diff --git a/.gitattributes b/.gitattributes index 1196d00ac52a642380556f67356a94f7cd6017f5..94c24d8df7f82a89be957fca947782500153f132 100644 --- a/.gitattributes +++ b/.gitattributes @@ -36,3 +36,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text train_hnet_with_docstring_18_04/wandb/run-20260417_085757-sa79g3yl/run-sa79g3yl.wandb filter=lfs diff=lfs merge=lfs -text wandb/run-20260418_121916-2mk39j3k/run-2mk39j3k.wandb filter=lfs diff=lfs merge=lfs -text pythia1b_v5_04_21/wandb/run-20260421_202839-8ing6xdi/run-8ing6xdi.wandb filter=lfs diff=lfs merge=lfs -text +lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_174418-uk7c7595/run-uk7c7595.wandb filter=lfs diff=lfs merge=lfs -text +lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_180603-5xd22ofy/run-5xd22ofy.wandb filter=lfs diff=lfs merge=lfs -text +lr_sweep/pythia_1b_lr_1e-5/wandb/run-20260425_180609-3z5g26qd/run-3z5g26qd.wandb filter=lfs diff=lfs merge=lfs -text +lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/run-bhvwo83l.wandb filter=lfs diff=lfs merge=lfs -text +lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/run-vg3if73m.wandb filter=lfs diff=lfs merge=lfs -text diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/config.yaml b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2ce6882f9550d8945d2e4ea8cbfe69bfbc8e1d9 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/config.yaml @@ -0,0 +1,55 @@ +model: + config_path: ${oc.env:PROJECT_ROOT}/hnet_project/configs/hnet_2stage_XL_code.json + checkpoint_path: ${oc.env:PROJECT_ROOT}/hnet_project/checkpoints/hnet_2stage_XL_code.pt +training: + epochs: 1 + batch_size: 4 + eval_batch_size: 24 + gradient_accumulation_steps: 4 + lr: 0.0001 + weight_decay: 0.1 + betas: + - 0.9 + - 0.95 + eps: 1.0e-08 + lr_scheduler: wsd + warmup_ratio: 0.1 + decay_ratio: 0.2 + warmup_steps: 100 + min_lr_ratio: 0.1 + lr_multiplier: + - 2.0 + - 1.5 + - 1.0 + load_balancing_weight: 0.01 + load_balancing_N: 4.0 + max_grad_norm: 1.0 + use_amp: true + resume: false + resume_checkpoint: null + warmup_model: true +data: + path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + max_context_len: 4096 + max_target_len: 256 + num_workers: 0 + pin_memory: true + max_train_samples: null + max_val_samples: 2000 +logging: + log_interval: 10 + save_interval: 0 + eval_interval: 2000 + save_every_epoch: false +tracking: + enabled: true + backend: wandb + project: code-completion_lr-sweep + run_name: hnet_xl_code_lr_1e-4 + entity: null + base_url: https://wandb.platun0v.ru + local_dir: ${paths.output_dir} +paths: + output_dir: /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4 +seed: 42 +device: cuda diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/hydra.yaml b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/hydra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68a7a42e57ac911bc33d7bae6b51b9a8b324014d --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/hydra.yaml @@ -0,0 +1,166 @@ +hydra: + run: + dir: ${paths.output_dir} + sweep: + dir: outputs/multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - tracking=wandb + - tracking.project=code-completion_lr-sweep + - tracking.run_name=hnet_xl_code_lr_1e-4 + - training.lr=1e-4 + - paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4 + - data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + job: + name: train + chdir: false + override_dirname: data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full,paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4,tracking.project=code-completion_lr-sweep,tracking.run_name=hnet_xl_code_lr_1e-4,tracking=wandb,training.lr=1e-4 + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /workspace/byte-llms-code/code_completion_exp/train_hnet + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /workspace/byte-llms-code/code_completion_exp/train_hnet/configs + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4 + choices: + paths: default + tracking: wandb + logging: default + data: default + training: default + model: hnet_xl_code + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/overrides.yaml b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70173c369c5c5f2ce124dead715224a06bf43b89 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/.hydra/overrides.yaml @@ -0,0 +1,6 @@ +- tracking=wandb +- tracking.project=code-completion_lr-sweep +- tracking.run_name=hnet_xl_code_lr_1e-4 +- training.lr=1e-4 +- paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4 +- data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/model_best.pt b/lr_sweep/hnet_xl_code_lr_1e-4/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..2924170400933acb8519f089e95ba0d0d59300b7 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deea8f88df1af58f18b8f52ea076900685df2c50b875a74312cae2403ef161b9 +size 3315165139 diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug-internal.log b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..c84a25cfd2c84ac88ee46273caecb4cea70dfbb3 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug-internal.log @@ -0,0 +1,15 @@ +{"time":"2026-04-25T20:07:22.546134919Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"} +{"time":"2026-04-25T20:07:23.113751252Z","level":"INFO","msg":"stream: created new stream","id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113818732Z","level":"INFO","msg":"handler: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113942767Z","level":"INFO","msg":"stream: started","id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113948903Z","level":"INFO","msg":"writer: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113961948Z","level":"INFO","msg":"sender: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.299850805Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"} +{"time":"2026-04-25T20:21:33.568962377Z","level":"ERROR","msg":"api: HTTP error","status":403,"method":"POST","url":"https://wandb.platun0v.ru/files/nikita/code-completion_lr-sweep/d5usyud5/file_stream"} +{"time":"2026-04-25T20:21:33.569055125Z","level":"ERROR+4","msg":"filestream: fatal error: filestream: failed to upload: 403 Forbidden url=https://wandb.platun0v.ru/files/nikita/code-completion_lr-sweep/d5usyud5/file_stream: "} +{"time":"2026-04-25T22:06:42.530286738Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2026-04-25T22:06:42.531121888Z","level":"INFO","msg":"handler: operation stats","stats":{}} +{"time":"2026-04-25T22:06:42.533763478Z","level":"INFO","msg":"stream: closing","id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533774521Z","level":"INFO","msg":"handler: closed","stream_id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533874231Z","level":"INFO","msg":"sender: closed","stream_id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533879555Z","level":"INFO","msg":"stream: closed","id":"d5usyud5"} diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug.log b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..6ec4982c30f0e9935b4b15155def037cc99ac484 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/debug.log @@ -0,0 +1,24 @@ +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0 +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Configure stats pid to 126864 +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Loading settings from environment variables +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug.log +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-internal.log +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:init():844] calling init triggers +2026-04-25 20:07:22,255 INFO MainThread:126864 [wandb_init.py:init():849] wandb.init called with sweep_config: {} +config: {'model': {'config_path': '/workspace/byte-llms-code/hnet_project/configs/hnet_2stage_XL_code.json', 'checkpoint_path': '/workspace/byte-llms-code/hnet_project/checkpoints/hnet_2stage_XL_code.pt'}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 24, 'gradient_accumulation_steps': 4, 'lr': 0.0001, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'lr_multiplier': [2.0, 1.5, 1.0], 'load_balancing_weight': 0.01, 'load_balancing_N': 4.0, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None, 'warmup_model': True}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 0, 'pin_memory': True, 'max_train_samples': None, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 0, 'eval_interval': 2000, 'save_every_epoch': False}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'hnet_xl_code_lr_1e-4', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_hnet/train.py'}} +2026-04-25 20:07:22,255 INFO MainThread:126864 [wandb_init.py:init():892] starting backend +2026-04-25 20:07:22,524 INFO MainThread:126864 [wandb_init.py:init():895] sending inform_init request +2026-04-25 20:07:22,544 INFO MainThread:126864 [wandb_init.py:init():903] backend started and connected +2026-04-25 20:07:22,547 INFO MainThread:126864 [wandb_init.py:init():973] updated telemetry +2026-04-25 20:07:22,566 INFO MainThread:126864 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout +2026-04-25 20:07:23,298 INFO MainThread:126864 [wandb_init.py:init():1044] starting run threads in backend +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_console_start():2529] atexit reg +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2377] redirect: wrap_raw +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2446] Wrapping output streams. +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2469] Redirects installed. +2026-04-25 20:07:23,461 INFO MainThread:126864 [wandb_init.py:init():1084] run started, returning control to user process +2026-04-25 22:06:41,457 INFO MainThread:126864 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/d5usyud5 +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0 +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_restore():2476] restore +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_restore():2482] restore done +2026-04-25 22:06:42,533 INFO MainThread:126864 [wandb_run.py:_footer_sync_info():3870] logging synced files diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/requirements.txt b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f040f697230340f8a88a6e7387f7e8983d11b547 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/requirements.txt @@ -0,0 +1,245 @@ +setuptools==78.1.1 +wheel==0.45.1 +pip==25.2 +webencodings==0.5.1 +triton==3.2.0 +pytz==2025.2 +pydub==0.25.1 +pure_eval==0.2.3 +ptyprocess==0.7.0 +nvidia-ml-py==13.590.48 +nvidia-cusparselt-cu12==0.6.2 +mpmath==1.3.0 +ipython-genutils==0.2.0 +fastjsonschema==2.21.2 +brotli==1.2.0 +antlr4-python3-runtime==4.9.3 +xxhash==3.6.0 +widgetsnbextension==4.0.14 +websocket-client==1.9.0 +webcolors==24.11.1 +wcwidth==0.2.14 +urllib3==2.5.0 +uri-template==1.3.0 +tzdata==2025.2 +typing_extensions==4.15.0 +types-python-dateutil==2.9.0.20251008 +traitlets==5.14.3 +tqdm==4.67.1 +tornado==6.5.2 +tomlkit==0.13.3 +tinycss2==1.4.0 +tabulate==0.9.0 +sympy==1.13.1 +soupsieve==2.8 +sniffio==1.3.1 +smmap==5.0.2 +six==1.17.0 +shellingham==1.5.4 +Send2Trash==1.8.3 +semantic-version==2.10.0 +safetensors==0.6.2 +rpds-py==0.27.1 +rfc3986-validator==0.1.1 +regex==2025.9.18 +pyzmq==27.1.0 +PyYAML==6.0.3 +python-multipart==0.0.22 +python-json-logger==4.0.0 +python-dotenv==1.2.1 +pyparsing==3.2.5 +PyJWT==2.8.0 +Pygments==2.19.2 +pycparser==2.23 +pyarrow==22.0.0 +psutil==7.1.0 +protobuf==6.33.4 +propcache==0.4.1 +prometheus_client==0.23.1 +portalocker==3.2.0 +platformdirs==4.5.0 +pillow==11.3.0 +pexpect==4.9.0 +pathspec==1.0.4 +parso==0.8.5 +pandocfilters==1.5.1 +packaging==25.0 +orjson==3.11.6 +opt_einsum==3.4.0 +nvidia-nvtx-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nccl-cu12==2.21.5 +nvidia-curand-cu12==10.3.5.147 +nvidia-cufile-cu12==1.13.1.3 +nvidia-cufft-cu12==11.2.1.3 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cublas-cu12==12.4.5.8 +numpy==2.3.3 +ninja==1.13.0 +networkx==3.5 +nest-asyncio==1.6.0 +narwhals==2.15.0 +mypy_extensions==1.1.0 +multidict==6.7.0 +mistune==3.1.4 +mdurl==0.1.2 +MarkupSafe==3.0.3 +lxml==6.0.2 +librt==0.8.0 +lark==1.3.0 +kiwisolver==1.4.9 +jupyterlab_widgets==3.0.15 +jupyterlab_pygments==0.3.0 +jsonpointer==3.0.0 +json5==0.12.1 +itsdangerous==2.2.0 +idna==3.10 +hf-xet==1.1.10 +h11==0.16.0 +groovy==0.1.2 +fsspec==2025.9.0 +frozenlist==1.8.0 +fqdn==1.5.1 +fonttools==4.60.1 +filelock==3.19.1 +ffmpy==1.0.0 +executing==2.2.1 +einops==0.8.1 +dill==0.4.0 +defusedxml==0.7.1 +decorator==5.2.1 +debugpy==1.8.17 +dacite==1.9.2 +cycler==0.12.1 +comm==0.2.3 +colorama==0.4.6 +click==8.3.1 +charset-normalizer==3.4.3 +certifi==2025.10.5 +bleach==6.2.0 +babel==2.17.0 +attrs==25.4.0 +async-lru==2.0.5 +asttokens==3.0.0 +annotated-types==0.7.0 +annotated-doc==0.0.4 +aiohappyeyeballs==2.6.1 +aiofiles==24.1.0 +yarl==1.22.0 +uvicorn==0.40.0 +typing-inspection==0.4.2 +terminado==0.18.1 +stack-data==0.6.3 +sentry-sdk==2.50.0 +scipy==1.17.0 +sacrebleu==2.6.0 +rfc3987-syntax==1.1.0 +rfc3339-validator==0.1.4 +requests==2.32.5 +reportlab==4.4.9 +referencing==0.36.2 +python-dateutil==2.9.0.post0 +pydantic_core==2.41.5 +prompt_toolkit==3.0.52 +plotly==6.5.2 +pathlib2==2.3.7.post1 +orderedmultidict==1.0.2 +optree==0.17.0 +omegaconf==2.3.0 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-cudnn-cu12==9.1.0.70 +mypy==1.19.1 +multiprocess==0.70.16 +matplotlib-inline==0.1.7 +markdown-it-py==4.0.0 +jupyter_core==5.8.1 +Jinja2==3.1.6 +jedi==0.19.2 +ipython_pygments_lexers==1.1.1 +httpcore==1.0.9 +gitdb==4.0.12 +ftfy==6.3.1 +contourpy==1.3.3 +cffi==2.0.0 +beautifulsoup4==4.14.2 +anyio==4.11.0 +aiosignal==1.4.0 +starlette==0.50.0 +rich==14.2.0 +pydantic==2.12.5 +pandas==2.3.3 +nvidia-cusolver-cu12==11.6.1.9 +matplotlib==3.10.7 +jupyter_server_terminals==0.5.3 +jupyter_client==8.6.3 +jsonschema-specifications==2025.9.1 +ipython==9.6.0 +hydra-core==1.3.2 +huggingface-hub==0.35.3 +httpx==0.28.1 +GitPython==3.1.46 +furl==2.1.4 +cryptography==46.0.4 +arrow==1.3.0 +argon2-cffi-bindings==25.1.0 +aiohttp==3.13.1 +wandb==0.24.0 +typer==0.21.1 +torch==2.6.0 +tokenizers==0.22.1 +seaborn==0.13.2 +safehttpx==0.1.7 +jsonschema==4.25.1 +joypy==0.2.6 +isoduration==20.11.0 +ipywidgets==8.1.7 +ipykernel==6.30.1 +gradio_client==2.0.3 +fastapi==0.128.0 +Authlib==1.6.6 +argon2-cffi==25.1.0 +transformers==4.57.6 +nbformat==5.10.4 +mlstm_kernels==2.0.2 +jupyter-console==6.6.3 +gradio==6.5.1 +datasets==4.3.0 +clearml==1.16.4 +accelerate==1.10.1 +xlstm==2.0.4 +nbclient==0.10.2 +jupyter-events==0.12.0 +trackio==0.15.0 +nbconvert==7.16.6 +jupyter_server==2.17.0 +notebook_shim==0.2.4 +jupyterlab_server==2.27.3 +jupyter-lsp==2.3.0 +nbclassic==1.3.3 +jupyterlab==4.4.9 +notebook==7.4.7 +jupyter_contrib_core==0.4.2 +jupyter==1.1.1 +jupyter_nbextensions_configurator==0.6.4 +causal-conv1d==1.5.0.post8 +flash_attn==2.7.4.post1 +mamba-ssm==2.2.4 +hnet==0.0.1 +autocommand==2.2.2 +backports.tarfile==1.2.0 +importlib_metadata==8.0.0 +inflect==7.3.1 +jaraco.collections==5.1.0 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +more-itertools==10.3.0 +packaging==24.2 +platformdirs==4.2.2 +tomli==2.0.1 +typeguard==4.3.0 +typing_extensions==4.12.2 +wheel==0.45.1 +zipp==3.19.2 diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/wandb-metadata.json b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..2b532aad359b7627efde97e6fe3ec7e410be609d --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/files/wandb-metadata.json @@ -0,0 +1 @@ +{"os": "Linux-5.4.0-176-generic-x86_64-with-glibc2.35", "python": "CPython 3.12.0", "started_at": "2026-04-25T20:07:22.253382Z", "args": ["tracking=wandb", "tracking.project=code-completion_lr-sweep", "tracking.run_name=hnet_xl_code_lr_1e-4", "training.lr=1e-4", "paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4", "data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full"], "program": "/workspace/byte-llms-code/code_completion_exp/train_hnet/train.py", "code_path": "code_completion_exp/train_hnet/train.py", "code_path_local": "train.py", "git": {"remote_url": "https://github.com/naryst/byte-llms-code.git", "commit": "f111e13281aa0dc58e24302edab5b0d5c2024586"}, "email": "nikita@local.ru", "root": "/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4", "host": "7504e518d24a", "executable": "/venv/bytellm/bin/python", "cpu_count": 64, "cpu_count_logical": 128, "gpu_type": "NVIDIA H100 80GB HBM3", "gpu_count": 4, "disk": {"/": {"total": "265214230528", "used": "104071081984"}}, "memory": {"total": "1081679683584"}, "gpu_nvidia": [{"name": "NVIDIA H100 80GB HBM3", "memory_total": "85520809984", "cuda_cores": 16896, "architecture": "Hopper", "uuid": "GPU-b60cdcab-2033-2009-41de-be646c953a20"}, {"name": "NVIDIA H100 80GB HBM3", "memory_total": "85520809984", "cuda_cores": 16896, "architecture": "Hopper", "uuid": "GPU-9982b420-4520-4238-c378-ec5a46015474"}, {"name": "NVIDIA H100 80GB HBM3", "memory_total": "85520809984", "cuda_cores": 16896, "architecture": "Hopper", "uuid": "GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f"}, {"name": "NVIDIA H100 80GB HBM3", "memory_total": "85520809984", "cuda_cores": 16896, "architecture": "Hopper", "uuid": "GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134"}], "cuda_version": "12.2", "writer_id": "yd4im4gytbm7o9yud168kac4xfyaj2kg"} \ No newline at end of file diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-core.log b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-core.log new file mode 100644 index 0000000000000000000000000000000000000000..1694e244c7e3283f988df7c712057874d1983c8e --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-core.log @@ -0,0 +1,16 @@ +{"time":"2026-04-25T20:07:22.336198949Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmp7x8yitda/port-126864.txt","pid":126864,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false,"enable-dcgm-profiling":false} +{"time":"2026-04-25T20:07:22.336658841Z","level":"INFO","msg":"server: will exit if parent process dies","ppid":126864} +{"time":"2026-04-25T20:07:22.336673483Z","level":"INFO","msg":"server: accepting connections","addr":{"Name":"/tmp/wandb-126864-126924-386692386/socket","Net":"unix"}} +{"time":"2026-04-25T20:07:22.52399421Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"1(@)"} +{"time":"2026-04-25T20:07:22.546020162Z","level":"INFO","msg":"handleInformInit: received","streamId":"d5usyud5","id":"1(@)"} +{"time":"2026-04-25T20:07:23.113949668Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"d5usyud5","id":"1(@)"} +{"time":"2026-04-25T22:06:42.533745721Z","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"d5usyud5","id":"1(@)"} +{"time":"2026-04-25T22:06:42.534224957Z","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"d5usyud5","id":"1(@)"} +{"time":"2026-04-25T22:06:42.545107466Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"1(@)"} +{"time":"2026-04-25T22:06:42.54513839Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"1(@)"} +{"time":"2026-04-25T22:06:42.545146482Z","level":"INFO","msg":"server is shutting down"} +{"time":"2026-04-25T22:06:42.545144273Z","level":"INFO","msg":"connection: closing","id":"1(@)"} +{"time":"2026-04-25T22:06:42.545195559Z","level":"INFO","msg":"connection: closed successfully","id":"1(@)"} +{"time":"2026-04-25T22:06:42.545209473Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"1(@)"} +{"time":"2026-04-25T22:06:42.545207861Z","level":"INFO","msg":"server: listener closed","addr":{"Name":"/tmp/wandb-126864-126924-386692386/socket","Net":"unix"}} +{"time":"2026-04-25T22:06:42.545231828Z","level":"INFO","msg":"server is closed"} diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-internal.log b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..c84a25cfd2c84ac88ee46273caecb4cea70dfbb3 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-internal.log @@ -0,0 +1,15 @@ +{"time":"2026-04-25T20:07:22.546134919Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"} +{"time":"2026-04-25T20:07:23.113751252Z","level":"INFO","msg":"stream: created new stream","id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113818732Z","level":"INFO","msg":"handler: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113942767Z","level":"INFO","msg":"stream: started","id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113948903Z","level":"INFO","msg":"writer: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.113961948Z","level":"INFO","msg":"sender: started","stream_id":"d5usyud5"} +{"time":"2026-04-25T20:07:23.299850805Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"} +{"time":"2026-04-25T20:21:33.568962377Z","level":"ERROR","msg":"api: HTTP error","status":403,"method":"POST","url":"https://wandb.platun0v.ru/files/nikita/code-completion_lr-sweep/d5usyud5/file_stream"} +{"time":"2026-04-25T20:21:33.569055125Z","level":"ERROR+4","msg":"filestream: fatal error: filestream: failed to upload: 403 Forbidden url=https://wandb.platun0v.ru/files/nikita/code-completion_lr-sweep/d5usyud5/file_stream: "} +{"time":"2026-04-25T22:06:42.530286738Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2026-04-25T22:06:42.531121888Z","level":"INFO","msg":"handler: operation stats","stats":{}} +{"time":"2026-04-25T22:06:42.533763478Z","level":"INFO","msg":"stream: closing","id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533774521Z","level":"INFO","msg":"handler: closed","stream_id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533874231Z","level":"INFO","msg":"sender: closed","stream_id":"d5usyud5"} +{"time":"2026-04-25T22:06:42.533879555Z","level":"INFO","msg":"stream: closed","id":"d5usyud5"} diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug.log b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..6ec4982c30f0e9935b4b15155def037cc99ac484 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug.log @@ -0,0 +1,24 @@ +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0 +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Configure stats pid to 126864 +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_setup.py:_flush():81] Loading settings from environment variables +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug.log +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/logs/debug-internal.log +2026-04-25 20:07:22,254 INFO MainThread:126864 [wandb_init.py:init():844] calling init triggers +2026-04-25 20:07:22,255 INFO MainThread:126864 [wandb_init.py:init():849] wandb.init called with sweep_config: {} +config: {'model': {'config_path': '/workspace/byte-llms-code/hnet_project/configs/hnet_2stage_XL_code.json', 'checkpoint_path': '/workspace/byte-llms-code/hnet_project/checkpoints/hnet_2stage_XL_code.pt'}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 24, 'gradient_accumulation_steps': 4, 'lr': 0.0001, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'lr_multiplier': [2.0, 1.5, 1.0], 'load_balancing_weight': 0.01, 'load_balancing_N': 4.0, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None, 'warmup_model': True}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 0, 'pin_memory': True, 'max_train_samples': None, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 0, 'eval_interval': 2000, 'save_every_epoch': False}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'hnet_xl_code_lr_1e-4', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/hnet_xl_code_lr_1e-4'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_hnet/train.py'}} +2026-04-25 20:07:22,255 INFO MainThread:126864 [wandb_init.py:init():892] starting backend +2026-04-25 20:07:22,524 INFO MainThread:126864 [wandb_init.py:init():895] sending inform_init request +2026-04-25 20:07:22,544 INFO MainThread:126864 [wandb_init.py:init():903] backend started and connected +2026-04-25 20:07:22,547 INFO MainThread:126864 [wandb_init.py:init():973] updated telemetry +2026-04-25 20:07:22,566 INFO MainThread:126864 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout +2026-04-25 20:07:23,298 INFO MainThread:126864 [wandb_init.py:init():1044] starting run threads in backend +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_console_start():2529] atexit reg +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2377] redirect: wrap_raw +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2446] Wrapping output streams. +2026-04-25 20:07:23,458 INFO MainThread:126864 [wandb_run.py:_redirect():2469] Redirects installed. +2026-04-25 20:07:23,461 INFO MainThread:126864 [wandb_init.py:init():1084] run started, returning control to user process +2026-04-25 22:06:41,457 INFO MainThread:126864 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/d5usyud5 +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0 +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_restore():2476] restore +2026-04-25 22:06:41,458 INFO MainThread:126864 [wandb_run.py:_restore():2482] restore done +2026-04-25 22:06:42,533 INFO MainThread:126864 [wandb_run.py:_footer_sync_info():3870] logging synced files diff --git a/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/run-d5usyud5.wandb.synced b/lr_sweep/hnet_xl_code_lr_1e-4/wandb/run-20260425_200722-d5usyud5/run-d5usyud5.wandb.synced new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lr_sweep/hnet_xl_code_lr_2e-4/model_best.pt b/lr_sweep/hnet_xl_code_lr_2e-4/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..8b972de16f772b20b1f458fe5c3172b86823174d --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_2e-4/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27da0445987873606937e8da90a139e715383c331433c8b343b1032d58a99dfe +size 3315165139 diff --git a/lr_sweep/hnet_xl_code_lr_2e-4/model_final.pt b/lr_sweep/hnet_xl_code_lr_2e-4/model_final.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e6bbb517886e29587903077d8352c6bee48fd9e --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_2e-4/model_final.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1292d689e685f5021ba431a47098538f801f139a90d9b1f60e5bedf40ec8470d +size 3315165484 diff --git a/lr_sweep/hnet_xl_code_lr_5e-4/model_best.pt b/lr_sweep/hnet_xl_code_lr_5e-4/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..41f3f87f493385026808edb5704309e6f4388c3e --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_5e-4/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59fb4a65838ee4c93c9a651d5208386e048b3366cd63030d4c3eca666b049d8 +size 3315165139 diff --git a/lr_sweep/hnet_xl_code_lr_5e-5/model_final.pt b/lr_sweep/hnet_xl_code_lr_5e-5/model_final.pt new file mode 100644 index 0000000000000000000000000000000000000000..3d6df02dfc7e8acaa9afc5e30437f1416c10ee78 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_5e-5/model_final.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61995e858953dad795445f51298ad71f7c48b0c30f45e155c344d887ec54feee +size 3315165484 diff --git a/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_174418-uk7c7595/run-uk7c7595.wandb b/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_174418-uk7c7595/run-uk7c7595.wandb new file mode 100644 index 0000000000000000000000000000000000000000..ee2982af04068d49c3bf16714ce7f31bdf7ed1d8 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_174418-uk7c7595/run-uk7c7595.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8649c797be4adc71350565c113790e396d320a4dab59ad0bfcfa9c52359f8136 +size 262144 diff --git a/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_180603-5xd22ofy/run-5xd22ofy.wandb b/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_180603-5xd22ofy/run-5xd22ofy.wandb new file mode 100644 index 0000000000000000000000000000000000000000..0ce2ec4a691ecf412cba5f2ccd67ef5c3296e3c0 --- /dev/null +++ b/lr_sweep/hnet_xl_code_lr_5e-5/wandb/run-20260425_180603-5xd22ofy/run-5xd22ofy.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f036c6bc8d1a639417b0a0dcee63aba1538252d56c9909985694b8a9d495fe4b +size 3090453 diff --git a/lr_sweep/pythia_1b_lr_1e-4/model_best.pt b/lr_sweep/pythia_1b_lr_1e-4/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..65ad0a60d281c774b8f9f2ccf1a0eb20455cd22f --- /dev/null +++ b/lr_sweep/pythia_1b_lr_1e-4/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:422582b324d72ddc359965f3e05900c619d037fc304bb1a79534a07aeab64b8c +size 2023640386 diff --git a/lr_sweep/pythia_1b_lr_1e-4/model_final.pt b/lr_sweep/pythia_1b_lr_1e-4/model_final.pt new file mode 100644 index 0000000000000000000000000000000000000000..19b96fd67ef0b6f947e35d34ddb722685eaf49ed --- /dev/null +++ b/lr_sweep/pythia_1b_lr_1e-4/model_final.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d63763bbe765f63c06d189c52d4f450b9fadeb50fc0b5d3a54bc3852e56b33a +size 2023640586 diff --git a/lr_sweep/pythia_1b_lr_1e-5/model_best.pt b/lr_sweep/pythia_1b_lr_1e-5/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..f0e8138a9ce9096c8592948723d6609ba0663abd --- /dev/null +++ b/lr_sweep/pythia_1b_lr_1e-5/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38de9490a221894949d64ba59563420c18526a5371942e0d25696954b1f043a6 +size 2023640386 diff --git a/lr_sweep/pythia_1b_lr_1e-5/model_final.pt b/lr_sweep/pythia_1b_lr_1e-5/model_final.pt new file mode 100644 index 0000000000000000000000000000000000000000..3aa7a679e1e4a5aff9523db3e2c7e9f1469557e9 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_1e-5/model_final.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7cd3fd070dc29831a36361bb6568ee3d1ea0ecafba18390f4f94edb6c5aed27 +size 2023640586 diff --git a/lr_sweep/pythia_1b_lr_1e-5/wandb/run-20260425_180609-3z5g26qd/run-3z5g26qd.wandb b/lr_sweep/pythia_1b_lr_1e-5/wandb/run-20260425_180609-3z5g26qd/run-3z5g26qd.wandb new file mode 100644 index 0000000000000000000000000000000000000000..3574a0c0ca19019be7e5fd337b032c822aba25f6 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_1e-5/wandb/run-20260425_180609-3z5g26qd/run-3z5g26qd.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7835e82778925abb7d4264abf5972683a674726381777883ce3873a3942545f +size 1254273 diff --git a/lr_sweep/pythia_1b_lr_2e-5/model_best.pt b/lr_sweep/pythia_1b_lr_2e-5/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..3d7eccac24054c46b5bd9b8d77e5a0e52adfcfa5 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_2e-5/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc45757660f862a5c312fc10135b84f763b2dc46a1aed91a59fb63e8398282d6 +size 2023640386 diff --git a/lr_sweep/pythia_1b_lr_2e-5/model_final.pt b/lr_sweep/pythia_1b_lr_2e-5/model_final.pt new file mode 100644 index 0000000000000000000000000000000000000000..55366344e869414aaf43c5b3cd6823608f7f4739 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_2e-5/model_final.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bef5c67a4c7b04bddb30ad4aeeadf5699e41054e83dea25d346a75c8e9f1cd11 +size 2023640586 diff --git a/lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/run-bhvwo83l.wandb b/lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/run-bhvwo83l.wandb new file mode 100644 index 0000000000000000000000000000000000000000..468611e9144d0af5fa64f0f78d477133bf8093d3 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/run-bhvwo83l.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f90a627a6c5da082ef95e0915b8f76d35709c371c1cc6948b1b506cfb95bc0c +size 1262550 diff --git a/lr_sweep/pythia_1b_lr_5e-5/.hydra/overrides.yaml b/lr_sweep/pythia_1b_lr_5e-5/.hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6ad4398691bd1ff159736803fed68852c2e0f25 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/.hydra/overrides.yaml @@ -0,0 +1,7 @@ +- tracking=wandb +- tracking.project=code-completion_lr-sweep +- tracking.run_name=pythia_1b_lr_5e-5 +- training.lr=5e-5 +- paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 +- model=pythia_1b +- data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full diff --git a/lr_sweep/pythia_1b_lr_5e-5/model_best.pt b/lr_sweep/pythia_1b_lr_5e-5/model_best.pt new file mode 100644 index 0000000000000000000000000000000000000000..538c576034e082c285d0a06afca8e62a3d607091 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/model_best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:141e5557328c13b72ef63dfe87de104d73410061c810d9e1c37a0ab7216a8c6e +size 2023640386 diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/debug-internal.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..a1276ba167adb0b0b26f4b43aa2552cc3d90da0b --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/debug-internal.log @@ -0,0 +1,13 @@ +{"time":"2026-04-25T19:30:46.037958475Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"} +{"time":"2026-04-25T19:30:46.57692305Z","level":"INFO","msg":"stream: created new stream","id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.576990227Z","level":"INFO","msg":"handler: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577113222Z","level":"INFO","msg":"stream: started","id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577131984Z","level":"INFO","msg":"sender: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577130838Z","level":"INFO","msg":"writer: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.758521679Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"} +{"time":"2026-04-25T20:13:21.644248736Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2026-04-25T20:13:21.756609599Z","level":"INFO","msg":"handler: operation stats","stats":{}} +{"time":"2026-04-25T20:13:21.759436838Z","level":"INFO","msg":"stream: closing","id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759447359Z","level":"INFO","msg":"handler: closed","stream_id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759544665Z","level":"INFO","msg":"sender: closed","stream_id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759549885Z","level":"INFO","msg":"stream: closed","id":"vg3if73m"} diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/debug.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..0491c6a67e1b9682511602a186734db8ab3a1eaa --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/debug.log @@ -0,0 +1,24 @@ +2026-04-25 19:30:45,740 INFO MainThread:108122 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0 +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_setup.py:_flush():81] Configure stats pid to 108122 +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_setup.py:_flush():81] Loading settings from environment variables +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug.log +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-internal.log +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():844] calling init triggers +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():849] wandb.init called with sweep_config: {} +config: {'model': {'name': 'EleutherAI/pythia-1b', 'checkpoint_path': None, 'from_scratch': False}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 12, 'gradient_accumulation_steps': 4, 'lr': 5e-05, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 4, 'pin_memory': True, 'max_train_samples': None, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 0, 'eval_interval': 2000, 'save_every_epoch': False}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'pythia_1b_lr_5e-5', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_pythia/train.py'}} +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():892] starting backend +2026-04-25 19:30:46,013 INFO MainThread:108122 [wandb_init.py:init():895] sending inform_init request +2026-04-25 19:30:46,036 INFO MainThread:108122 [wandb_init.py:init():903] backend started and connected +2026-04-25 19:30:46,039 INFO MainThread:108122 [wandb_init.py:init():973] updated telemetry +2026-04-25 19:30:46,057 INFO MainThread:108122 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout +2026-04-25 19:30:46,757 INFO MainThread:108122 [wandb_init.py:init():1044] starting run threads in backend +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_console_start():2529] atexit reg +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2377] redirect: wrap_raw +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2446] Wrapping output streams. +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2469] Redirects installed. +2026-04-25 19:30:46,921 INFO MainThread:108122 [wandb_init.py:init():1084] run started, returning control to user process +2026-04-25 20:13:20,732 INFO MainThread:108122 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/vg3if73m +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0 +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_restore():2476] restore +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_restore():2482] restore done +2026-04-25 20:13:21,758 INFO MainThread:108122 [wandb_run.py:_footer_sync_info():3870] logging synced files diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/code/code_completion_exp/train_pythia/train.py b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/code/code_completion_exp/train_pythia/train.py new file mode 100644 index 0000000000000000000000000000000000000000..a4739962b19b1d61085c8b55220470866db8aea1 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/code/code_completion_exp/train_pythia/train.py @@ -0,0 +1,606 @@ +""" +Training Pipeline для Pythia (decoder-only transformer) на задаче Code Completion. + +Конфигурация через Hydra + OmegaConf, логирование в Trackio. +Поддержка DDP через Accelerate для multi-GPU тренировки. + +Использование: + # Базовый запуск (single GPU) + python train.py + + # Multi-GPU с Accelerate + accelerate launch train.py + + # Multi-GPU с указанием количества GPU + accelerate launch --num_processes=4 train.py + + # Переопределение параметров через CLI + python train.py training.lr=1e-4 training.epochs=5 + + # Выбор другого конфига модели + python train.py model=pythia_160m + + # Multirun (sweep) + python train.py --multirun training.lr=1e-4,3e-4,1e-3 + + # Без логирования + python train.py tracking.enabled=false +""" + +import os +import math +import time +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from datasets import load_from_disk + +import hydra +from hydra.core.hydra_config import HydraConfig +from omegaconf import DictConfig, OmegaConf +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, + AutoConfig, + PreTrainedTokenizerBase, +) +from accelerate import Accelerator +from accelerate.utils import set_seed as accelerate_set_seed + +# Ensure repo root is on sys.path (needed when running from subdirectory) +import sys +sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +# Shared training library +from training_lib.utils import AverageMeter, log_message +from training_lib.checkpointing import save_checkpoint, load_checkpoint +from training_lib.schedulers import get_lr_scheduler +from training_lib.tracking import init_tracking, log_metrics, finish_tracking +from training_lib.validation import run_validation + + +# ============================================================================ +# ДАННЫЕ +# ============================================================================ + + +class CodeCompletionCollator: + """Collate function для батчирования примеров code completion.""" + + def __init__( + self, + tokenizer: PreTrainedTokenizerBase, + max_context_len: int = 1024, + max_target_len: int = 256, + ): + self.tokenizer = tokenizer + self.max_context_len = max_context_len + self.max_target_len = max_target_len + self.pad_token_id = tokenizer.pad_token_id + + def __call__(self, batch: list[dict]) -> dict: + contexts = [item["context"] for item in batch] + targets = [item["target"] for item in batch] + + encoded_contexts = self.tokenizer( + contexts, + add_special_tokens=True, + truncation=True, + max_length=self.max_context_len, + return_tensors=None, + ) + encoded_targets = self.tokenizer( + targets, + add_special_tokens=False, + truncation=True, + max_length=self.max_target_len, + return_tensors=None, + ) + + input_ids_list = [] + context_lengths = [] + + for ctx_ids, tgt_ids in zip( + encoded_contexts["input_ids"], encoded_targets["input_ids"] + ): + tgt_ids = tgt_ids + [self.tokenizer.eos_token_id] + context_lengths.append(len(ctx_ids)) + input_ids_list.append(ctx_ids + tgt_ids) + + max_len = max(len(ids) for ids in input_ids_list) + + padded_input_ids = [] + attention_mask = [] + + for ids in input_ids_list: + padding_len = max_len - len(ids) + padded_input_ids.append(ids + [self.pad_token_id] * padding_len) + attention_mask.append([1] * len(ids) + [0] * padding_len) + + return { + "input_ids": torch.tensor(padded_input_ids, dtype=torch.long), + "attention_mask": torch.tensor(attention_mask, dtype=torch.long), + "context_lengths": torch.tensor(context_lengths, dtype=torch.long), + } + + +def create_dataloaders( + cfg: DictConfig, tokenizer: PreTrainedTokenizerBase +) -> dict[str, DataLoader]: + """Создание DataLoader'ов для train и validation.""" + dataset_dict = load_from_disk(cfg.data.path) + + collator = CodeCompletionCollator( + tokenizer=tokenizer, + max_context_len=cfg.data.max_context_len, + max_target_len=cfg.data.max_target_len, + ) + + dataloaders = {} + + if "train" in dataset_dict: + train_dataset = dataset_dict["train"] + max_train = cfg.data.get("max_train_samples", None) + if max_train is not None: + train_dataset = train_dataset.select(range(min(max_train, len(train_dataset)))) + dataloaders["train"] = DataLoader( + train_dataset, + batch_size=cfg.training.batch_size, + shuffle=True, + collate_fn=collator, + num_workers=cfg.data.num_workers, + pin_memory=cfg.data.pin_memory, + ) + + if "validation" in dataset_dict: + val_dataset = dataset_dict["validation"] + max_val = cfg.data.get("max_val_samples", None) + if max_val is not None: + val_dataset = val_dataset.select(range(min(max_val, len(val_dataset)))) + eval_batch_size = cfg.training.get("eval_batch_size", cfg.training.batch_size) + dataloaders["validation"] = DataLoader( + val_dataset, + batch_size=eval_batch_size, + shuffle=False, + collate_fn=collator, + num_workers=cfg.data.num_workers, + pin_memory=cfg.data.pin_memory, + ) + + return dataloaders + + + + +# ============================================================================ +# LOSS ФУНКЦИИ +# ============================================================================ + + +def compute_loss( + logits: torch.Tensor, + input_ids: torch.Tensor, + context_lengths: torch.Tensor, + attention_mask: torch.Tensor, +) -> dict: + """Вычисление loss для авторегрессионной модели.""" + batch_size, seq_len, vocab_size = logits.shape + + shift_logits = logits[:, :-1, :].contiguous() + shift_labels = input_ids[:, 1:].contiguous() + shift_mask = attention_mask[:, 1:].contiguous() + + target_mask = torch.zeros_like(shift_labels, dtype=torch.bool) + for i in range(batch_size): + ctx_len = context_lengths[i].item() + target_mask[i, ctx_len - 1 :] = True + + final_mask = target_mask & shift_mask.bool() + + if final_mask.sum() > 0: + loss = F.cross_entropy( + shift_logits[final_mask], shift_labels[final_mask], reduction="mean" + ) + else: + loss = torch.tensor(0.0, device=logits.device) + + return {"loss": loss} + + +def _pythia_forward_loss( + model: nn.Module, + batch: dict, + cfg: DictConfig, + accelerator: Accelerator, +) -> dict: + """Forward + loss for a plain HF causal LM (attention_mask= kwarg, .logits).""" + input_ids = batch["input_ids"] + attention_mask = batch["attention_mask"] + context_lengths = batch["context_lengths"] + output = model(input_ids, attention_mask=attention_mask) + return compute_loss(output.logits, input_ids, context_lengths, attention_mask) + + +# ============================================================================ +# PARAMETER GROUPING +# ============================================================================ + + +def group_params(model: nn.Module, weight_decay: float) -> list[dict]: + """Группировка параметров для optimizer.""" + decay_params = [] + no_decay_params = [] + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + if "bias" in name or "LayerNorm" in name or "layernorm" in name: + no_decay_params.append(param) + else: + decay_params.append(param) + + return [ + {"params": decay_params, "weight_decay": weight_decay}, + {"params": no_decay_params, "weight_decay": 0.0}, + ] + + + + +# ============================================================================ +# TRAINING LOOP +# ============================================================================ + + +def train_epoch( + model: nn.Module, + dataloader: DataLoader, + optimizer: torch.optim.Optimizer, + scheduler, + cfg: DictConfig, + epoch: int, + global_step: int, + accelerator: Accelerator, + val_dataloader: DataLoader | None = None, + best_val_loss: float = float("inf"), +) -> tuple[int, float]: + """Один epoch тренировки. Возвращает (global_step, best_val_loss).""" + model.train() + + loss_meter = AverageMeter() + + optimizer.zero_grad() + accumulated_loss = 0.0 + accumulated_steps = 0 + + epoch_start_time = time.time() + step_start_time = time.time() + + for batch_idx, batch in enumerate(dataloader): + input_ids = batch["input_ids"] + attention_mask = batch["attention_mask"] + context_lengths = batch["context_lengths"] + + with accelerator.autocast(): + output = model(input_ids, attention_mask=attention_mask) + logits = output.logits + loss_dict = compute_loss( + logits, input_ids, context_lengths, attention_mask + ) + + loss = loss_dict["loss"] / cfg.training.gradient_accumulation_steps + accelerator.backward(loss) + + accumulated_loss += loss_dict["loss"].item() + accumulated_steps += 1 + + if accumulated_steps == cfg.training.gradient_accumulation_steps: + if cfg.training.max_grad_norm > 0: + accelerator.clip_grad_norm_( + model.parameters(), cfg.training.max_grad_norm + ) + + optimizer.step() + scheduler.step() + optimizer.zero_grad() + + avg_loss = accumulated_loss / cfg.training.gradient_accumulation_steps + loss_meter.update(avg_loss) + + global_step += 1 + + if global_step % cfg.logging.log_interval == 0: + step_time = time.time() - step_start_time + current_lr = scheduler.get_last_lr()[0] + + metrics = { + "train/loss": loss_meter.val, + "train/loss_avg": loss_meter.avg, + "train/lr": current_lr, + "train/epoch": epoch, + "train/step_time": step_time / cfg.logging.log_interval, + } + + log_metrics(metrics, step=global_step) + + log_message( + f"Epoch {epoch} | Step {global_step} | " + f"Loss: {loss_meter.avg:.4f} | " + f"LR: {current_lr:.2e}", + cfg, + accelerator, + ) + + step_start_time = time.time() + + if ( + cfg.logging.save_interval > 0 + and global_step % cfg.logging.save_interval == 0 + ): + save_checkpoint( + model, optimizer, scheduler, global_step, epoch, cfg, accelerator + ) + + eval_interval = cfg.logging.get("eval_interval", 0) + if ( + eval_interval > 0 + and val_dataloader is not None + and global_step % eval_interval == 0 + ): + val_metrics = run_validation( + model=model, + dataloader=val_dataloader, + cfg=cfg, + global_step=global_step, + accelerator=accelerator, + forward_loss_fn=_pythia_forward_loss, + ) + + if val_metrics["val/loss"] < best_val_loss: + best_val_loss = val_metrics["val/loss"] + if accelerator.is_main_process: + best_model_path = Path(cfg.paths.output_dir) / "model_best.pt" + unwrapped_model = accelerator.unwrap_model(model) + torch.save(unwrapped_model.state_dict(), best_model_path) + log_message( + f"New best model saved! Val loss: {best_val_loss:.4f}", + cfg, + accelerator + ) + + log_metrics( + { + "best/val_loss": best_val_loss, + "best/val_perplexity": val_metrics["val/perplexity"], + "best/step": global_step, + }, + step=global_step, + ) + + model.train() + + accumulated_loss = 0.0 + accumulated_steps = 0 + + epoch_time = time.time() - epoch_start_time + + log_message( + f"Epoch {epoch} completed in {epoch_time:.2f}s | " + f"Loss: {loss_meter.avg:.4f}", + cfg, + accelerator, + ) + + log_metrics({ + "epoch/loss": loss_meter.avg, + "epoch/time": epoch_time, + }) + + return global_step, best_val_loss + + +# ============================================================================ +# MAIN +# ============================================================================ + + +@hydra.main(version_base=None, config_path="configs", config_name="config") +def main(cfg: DictConfig): + """Главная функция тренировки с поддержкой DDP через Accelerate.""" + + # === Performance: Enable TF32 for faster matmuls on Ampere+ GPUs === + torch.set_float32_matmul_precision('high') + + # === Accelerator Setup === + mixed_precision = "bf16" if cfg.training.use_amp else "no" + + accelerator = Accelerator( + mixed_precision=mixed_precision, + gradient_accumulation_steps=cfg.training.gradient_accumulation_steps, + ) + + # === Setup === + accelerate_set_seed(cfg.seed) + + if cfg.paths.output_dir is None: + cfg.paths.output_dir = HydraConfig.get().runtime.output_dir + + OmegaConf.resolve(cfg) + + log_message(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'not set')}", cfg, accelerator) + log_message(f"Number of processes: {accelerator.num_processes}", cfg, accelerator) + log_message(f"Process index: {accelerator.process_index}", cfg, accelerator) + log_message(f"Mixed precision: {mixed_precision}", cfg, accelerator) + + log_message("=" * 60, cfg, accelerator) + log_message("Pythia Training Pipeline (Hydra + Trackio + Accelerate)", cfg, accelerator) + log_message("=" * 60, cfg, accelerator) + log_message(f"Config:\n{OmegaConf.to_yaml(cfg)}", cfg, accelerator) + + # === Trackio Init === + init_tracking(cfg, accelerator) + + # === Tokenizer === + log_message("Initializing tokenizer...", cfg, accelerator) + tokenizer = AutoTokenizer.from_pretrained(cfg.model.name) + + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + + # === Model === + log_message("Loading model...", cfg, accelerator) + + # Flash Attention 2 + torch_dtype = torch.bfloat16 if cfg.training.use_amp else torch.float32 + + if cfg.model.checkpoint_path: + model = AutoModelForCausalLM.from_pretrained( + cfg.model.name, + attn_implementation="flash_attention_2", + torch_dtype=torch_dtype, + ) + checkpoint = torch.load(cfg.model.checkpoint_path, map_location="cpu") + model.load_state_dict(checkpoint["model_state_dict"] if "model_state_dict" in checkpoint else checkpoint) + log_message(f"Loaded checkpoint: {cfg.model.checkpoint_path}", cfg, accelerator) + elif cfg.model.from_scratch: + config = AutoConfig.from_pretrained(cfg.model.name) + config._attn_implementation = "flash_attention_2" + model = AutoModelForCausalLM.from_config(config, torch_dtype=torch_dtype) + log_message(f"Initialized from scratch: {cfg.model.name}", cfg, accelerator) + else: + model = AutoModelForCausalLM.from_pretrained( + cfg.model.name, + attn_implementation="flash_attention_2", + torch_dtype=torch_dtype, + ) + log_message(f"Loaded pretrained: {cfg.model.name}", cfg, accelerator) + + model.train() + + # Log model info + total_params = sum(p.numel() for p in model.parameters()) + trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + log_message(f"Total params: {total_params:,}", cfg, accelerator) + log_message(f"Trainable params: {trainable_params:,}", cfg, accelerator) + + # === Data === + log_message("Creating dataloaders...", cfg, accelerator) + dataloaders = create_dataloaders(cfg, tokenizer) + + train_dataloader = dataloaders["train"] + val_dataloader = dataloaders.get("validation", None) + + log_message(f"Train dataset size: {len(train_dataloader.dataset)}", cfg, accelerator) + log_message(f"Train batches per epoch (before DDP split): {len(train_dataloader)}", cfg, accelerator) + + if val_dataloader: + log_message(f"Validation dataset size: {len(val_dataloader.dataset)}", cfg, accelerator) + log_message(f"Validation batches: {len(val_dataloader)}", cfg, accelerator) + else: + log_message("No validation dataset found", cfg, accelerator) + + # === Optimizer === + log_message("Creating optimizer...", cfg, accelerator) + param_groups = group_params(model, cfg.training.weight_decay) + + optimizer = torch.optim.AdamW( + param_groups, + lr=cfg.training.lr, + betas=tuple(cfg.training.betas), + eps=cfg.training.eps, + ) + + # === Scheduler === + steps_per_epoch = math.ceil( + len(train_dataloader) / accelerator.num_processes + ) + total_steps = ( + cfg.training.epochs + * steps_per_epoch + // cfg.training.gradient_accumulation_steps + ) + scheduler = get_lr_scheduler(optimizer, cfg, total_steps) + + log_message( + f"Total steps: {total_steps}, Steps per epoch: {steps_per_epoch}", + cfg, + accelerator + ) + + # === Accelerate Prepare === + log_message("Preparing model, optimizer, and dataloaders with Accelerate...", cfg, accelerator) + + if val_dataloader is not None: + model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare( + model, optimizer, train_dataloader, val_dataloader, scheduler + ) + else: + model, optimizer, train_dataloader, scheduler = accelerator.prepare( + model, optimizer, train_dataloader, scheduler + ) + + log_message(f"Train batches per epoch (after DDP split): {len(train_dataloader)}", cfg, accelerator) + + # === Resume === + global_step = 0 + start_epoch = 1 + + if cfg.training.resume and cfg.training.resume_checkpoint: + global_step, start_epoch = load_checkpoint( + model, optimizer, scheduler, cfg.training.resume_checkpoint, cfg, accelerator + ) + start_epoch += 1 + + # === Training Loop === + log_message("Starting training...", cfg, accelerator) + + best_val_loss = float("inf") + + try: + for epoch in range(start_epoch, cfg.training.epochs + 1): + log_message(f"\n{'=' * 60}", cfg, accelerator) + log_message(f"EPOCH {epoch}/{cfg.training.epochs}", cfg, accelerator) + log_message(f"{'=' * 60}", cfg, accelerator) + + global_step, best_val_loss = train_epoch( + model=model, + dataloader=train_dataloader, + optimizer=optimizer, + scheduler=scheduler, + cfg=cfg, + epoch=epoch, + global_step=global_step, + accelerator=accelerator, + val_dataloader=val_dataloader, + best_val_loss=best_val_loss, + ) + + if cfg.logging.save_every_epoch: + save_checkpoint( + model, optimizer, scheduler, global_step, epoch, cfg, accelerator + ) + + except KeyboardInterrupt: + log_message("Training interrupted by user", cfg, accelerator) + save_checkpoint(model, optimizer, scheduler, global_step, epoch, cfg, accelerator) + + # === Final Save === + log_message("\nTraining completed!", cfg, accelerator) + + if accelerator.is_main_process: + final_model_path = Path(cfg.paths.output_dir) / "model_final.pt" + unwrapped_model = accelerator.unwrap_model(model) + torch.save(unwrapped_model.state_dict(), final_model_path) + log_message(f"Final model: {final_model_path}", cfg, accelerator) + + accelerator.wait_for_everyone() + finish_tracking() + + +if __name__ == "__main__": + main() diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/config.yaml b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d4485b456bf4caa6fd613d61d5fc3b165b266d8 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/config.yaml @@ -0,0 +1,146 @@ +_wandb: + value: + cli_version: 0.24.0 + code_path: code/code_completion_exp/train_pythia/train.py + e: + 7tgf82pk1kp15jun4e833mn85qzvctn8: + args: + - tracking=wandb + - tracking.project=code-completion_lr-sweep + - tracking.run_name=pythia_1b_lr_5e-5 + - training.lr=5e-5 + - paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + - model=pythia_1b + - data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + codePath: code_completion_exp/train_pythia/train.py + codePathLocal: train.py + cpu_count: 64 + cpu_count_logical: 128 + cudaVersion: "12.2" + disk: + /: + total: "265214230528" + used: "70239207424" + email: nikita@local.ru + executable: /venv/bytellm/bin/python + git: + commit: f111e13281aa0dc58e24302edab5b0d5c2024586 + remote: https://github.com/naryst/byte-llms-code.git + gpu: NVIDIA H100 80GB HBM3 + gpu_count: 4 + gpu_nvidia: + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-b60cdcab-2033-2009-41de-be646c953a20 + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-9982b420-4520-4238-c378-ec5a46015474 + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134 + host: 7504e518d24a + memory: + total: "1081679683584" + os: Linux-5.4.0-176-generic-x86_64-with-glibc2.35 + program: /workspace/byte-llms-code/code_completion_exp/train_pythia/train.py + python: CPython 3.12.0 + root: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + startedAt: "2026-04-25T17:57:54.046461Z" + writerId: 7tgf82pk1kp15jun4e833mn85qzvctn8 + m: [] + python_version: 3.12.0 + t: + "1": + - 1 + - 11 + - 49 + - 50 + - 51 + - 71 + - 105 + "2": + - 1 + - 11 + - 49 + - 50 + - 51 + - 71 + - 105 + "3": + - 2 + - 13 + - 16 + - 61 + "4": 3.12.0 + "5": 0.24.0 + "6": 4.57.6 + "12": 0.24.0 + "13": linux-x86_64 +data: + value: + max_context_len: 4096 + max_target_len: 256 + max_train_samples: 20000 + max_val_samples: 2000 + num_workers: 4 + path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + pin_memory: true +device: + value: cuda +logging: + value: + eval_interval: 1000 + log_interval: 10 + save_every_epoch: true + save_interval: 3000 +model: + value: + checkpoint_path: null + from_scratch: false + name: EleutherAI/pythia-1b +paths: + value: + output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 +seed: + value: 42 +tracking: + value: + backend: wandb + base_url: https://wandb.platun0v.ru + enabled: true + entity: null + local_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + project: code-completion_lr-sweep + run_name: pythia_1b_lr_5e-5 +training: + value: + batch_size: 4 + betas: + - 0.9 + - 0.95 + decay_ratio: 0.2 + epochs: 1 + eps: 1e-08 + eval_batch_size: 12 + gradient_accumulation_steps: 4 + lr: 5e-05 + lr_scheduler: wsd + max_grad_norm: 1 + min_lr_ratio: 0.1 + resume: false + resume_checkpoint: null + use_amp: true + warmup_ratio: 0.1 + warmup_steps: 100 + weight_decay: 0.1 diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/output.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..765f125fe7efe932c1f12f1b03129fce9d504e06 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/output.log @@ -0,0 +1,29 @@ +[2026-04-25 17:57:54] Initializing tokenizer... +[2026-04-25 17:57:55] Loading model... +`torch_dtype` is deprecated! Use `dtype` instead! +[2026-04-25 17:57:58] Loaded pretrained: EleutherAI/pythia-1b +[2026-04-25 17:57:58] Total params: 1,011,781,632 +[2026-04-25 17:57:58] Trainable params: 1,011,781,632 +[2026-04-25 17:57:58] Creating dataloaders... +[2026-04-25 17:57:58] Train dataset size: 20000 +[2026-04-25 17:57:58] Train batches per epoch (before DDP split): 5000 +[2026-04-25 17:57:58] Validation dataset size: 2000 +[2026-04-25 17:57:58] Validation batches: 167 +[2026-04-25 17:57:58] Creating optimizer... +[2026-04-25 17:57:58] Total steps: 625, Steps per epoch: 2500 +[2026-04-25 17:57:58] Preparing model, optimizer, and dataloaders with Accelerate... +[2026-04-25 17:58:00] Train batches per epoch (after DDP split): 2500 +[2026-04-25 17:58:00] Starting training... +[2026-04-25 17:58:00] +============================================================ +[2026-04-25 17:58:00] EPOCH 1/1 +[2026-04-25 17:58:00] ============================================================ +[2026-04-25 17:58:03] Epoch 1 | Step 10 | Loss: 1.6976 | LR: 1.95e-05 +[2026-04-25 17:58:06] Epoch 1 | Step 20 | Loss: 1.3818 | LR: 3.40e-05 +[2026-04-25 17:58:09] Epoch 1 | Step 30 | Loss: 1.2980 | LR: 4.85e-05 +[2026-04-25 17:58:11] Epoch 1 | Step 40 | Loss: 1.2758 | LR: 5.00e-05 +[2026-04-25 17:58:12] Training interrupted by user +[2026-04-25 17:58:19] Checkpoint saved: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/checkpoints/checkpoint_step_0.pt +[2026-04-25 17:58:25] +Training completed! +[2026-04-25 17:58:27] Final model: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/model_final.pt diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/requirements.txt b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f040f697230340f8a88a6e7387f7e8983d11b547 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/requirements.txt @@ -0,0 +1,245 @@ +setuptools==78.1.1 +wheel==0.45.1 +pip==25.2 +webencodings==0.5.1 +triton==3.2.0 +pytz==2025.2 +pydub==0.25.1 +pure_eval==0.2.3 +ptyprocess==0.7.0 +nvidia-ml-py==13.590.48 +nvidia-cusparselt-cu12==0.6.2 +mpmath==1.3.0 +ipython-genutils==0.2.0 +fastjsonschema==2.21.2 +brotli==1.2.0 +antlr4-python3-runtime==4.9.3 +xxhash==3.6.0 +widgetsnbextension==4.0.14 +websocket-client==1.9.0 +webcolors==24.11.1 +wcwidth==0.2.14 +urllib3==2.5.0 +uri-template==1.3.0 +tzdata==2025.2 +typing_extensions==4.15.0 +types-python-dateutil==2.9.0.20251008 +traitlets==5.14.3 +tqdm==4.67.1 +tornado==6.5.2 +tomlkit==0.13.3 +tinycss2==1.4.0 +tabulate==0.9.0 +sympy==1.13.1 +soupsieve==2.8 +sniffio==1.3.1 +smmap==5.0.2 +six==1.17.0 +shellingham==1.5.4 +Send2Trash==1.8.3 +semantic-version==2.10.0 +safetensors==0.6.2 +rpds-py==0.27.1 +rfc3986-validator==0.1.1 +regex==2025.9.18 +pyzmq==27.1.0 +PyYAML==6.0.3 +python-multipart==0.0.22 +python-json-logger==4.0.0 +python-dotenv==1.2.1 +pyparsing==3.2.5 +PyJWT==2.8.0 +Pygments==2.19.2 +pycparser==2.23 +pyarrow==22.0.0 +psutil==7.1.0 +protobuf==6.33.4 +propcache==0.4.1 +prometheus_client==0.23.1 +portalocker==3.2.0 +platformdirs==4.5.0 +pillow==11.3.0 +pexpect==4.9.0 +pathspec==1.0.4 +parso==0.8.5 +pandocfilters==1.5.1 +packaging==25.0 +orjson==3.11.6 +opt_einsum==3.4.0 +nvidia-nvtx-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nccl-cu12==2.21.5 +nvidia-curand-cu12==10.3.5.147 +nvidia-cufile-cu12==1.13.1.3 +nvidia-cufft-cu12==11.2.1.3 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cublas-cu12==12.4.5.8 +numpy==2.3.3 +ninja==1.13.0 +networkx==3.5 +nest-asyncio==1.6.0 +narwhals==2.15.0 +mypy_extensions==1.1.0 +multidict==6.7.0 +mistune==3.1.4 +mdurl==0.1.2 +MarkupSafe==3.0.3 +lxml==6.0.2 +librt==0.8.0 +lark==1.3.0 +kiwisolver==1.4.9 +jupyterlab_widgets==3.0.15 +jupyterlab_pygments==0.3.0 +jsonpointer==3.0.0 +json5==0.12.1 +itsdangerous==2.2.0 +idna==3.10 +hf-xet==1.1.10 +h11==0.16.0 +groovy==0.1.2 +fsspec==2025.9.0 +frozenlist==1.8.0 +fqdn==1.5.1 +fonttools==4.60.1 +filelock==3.19.1 +ffmpy==1.0.0 +executing==2.2.1 +einops==0.8.1 +dill==0.4.0 +defusedxml==0.7.1 +decorator==5.2.1 +debugpy==1.8.17 +dacite==1.9.2 +cycler==0.12.1 +comm==0.2.3 +colorama==0.4.6 +click==8.3.1 +charset-normalizer==3.4.3 +certifi==2025.10.5 +bleach==6.2.0 +babel==2.17.0 +attrs==25.4.0 +async-lru==2.0.5 +asttokens==3.0.0 +annotated-types==0.7.0 +annotated-doc==0.0.4 +aiohappyeyeballs==2.6.1 +aiofiles==24.1.0 +yarl==1.22.0 +uvicorn==0.40.0 +typing-inspection==0.4.2 +terminado==0.18.1 +stack-data==0.6.3 +sentry-sdk==2.50.0 +scipy==1.17.0 +sacrebleu==2.6.0 +rfc3987-syntax==1.1.0 +rfc3339-validator==0.1.4 +requests==2.32.5 +reportlab==4.4.9 +referencing==0.36.2 +python-dateutil==2.9.0.post0 +pydantic_core==2.41.5 +prompt_toolkit==3.0.52 +plotly==6.5.2 +pathlib2==2.3.7.post1 +orderedmultidict==1.0.2 +optree==0.17.0 +omegaconf==2.3.0 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-cudnn-cu12==9.1.0.70 +mypy==1.19.1 +multiprocess==0.70.16 +matplotlib-inline==0.1.7 +markdown-it-py==4.0.0 +jupyter_core==5.8.1 +Jinja2==3.1.6 +jedi==0.19.2 +ipython_pygments_lexers==1.1.1 +httpcore==1.0.9 +gitdb==4.0.12 +ftfy==6.3.1 +contourpy==1.3.3 +cffi==2.0.0 +beautifulsoup4==4.14.2 +anyio==4.11.0 +aiosignal==1.4.0 +starlette==0.50.0 +rich==14.2.0 +pydantic==2.12.5 +pandas==2.3.3 +nvidia-cusolver-cu12==11.6.1.9 +matplotlib==3.10.7 +jupyter_server_terminals==0.5.3 +jupyter_client==8.6.3 +jsonschema-specifications==2025.9.1 +ipython==9.6.0 +hydra-core==1.3.2 +huggingface-hub==0.35.3 +httpx==0.28.1 +GitPython==3.1.46 +furl==2.1.4 +cryptography==46.0.4 +arrow==1.3.0 +argon2-cffi-bindings==25.1.0 +aiohttp==3.13.1 +wandb==0.24.0 +typer==0.21.1 +torch==2.6.0 +tokenizers==0.22.1 +seaborn==0.13.2 +safehttpx==0.1.7 +jsonschema==4.25.1 +joypy==0.2.6 +isoduration==20.11.0 +ipywidgets==8.1.7 +ipykernel==6.30.1 +gradio_client==2.0.3 +fastapi==0.128.0 +Authlib==1.6.6 +argon2-cffi==25.1.0 +transformers==4.57.6 +nbformat==5.10.4 +mlstm_kernels==2.0.2 +jupyter-console==6.6.3 +gradio==6.5.1 +datasets==4.3.0 +clearml==1.16.4 +accelerate==1.10.1 +xlstm==2.0.4 +nbclient==0.10.2 +jupyter-events==0.12.0 +trackio==0.15.0 +nbconvert==7.16.6 +jupyter_server==2.17.0 +notebook_shim==0.2.4 +jupyterlab_server==2.27.3 +jupyter-lsp==2.3.0 +nbclassic==1.3.3 +jupyterlab==4.4.9 +notebook==7.4.7 +jupyter_contrib_core==0.4.2 +jupyter==1.1.1 +jupyter_nbextensions_configurator==0.6.4 +causal-conv1d==1.5.0.post8 +flash_attn==2.7.4.post1 +mamba-ssm==2.2.4 +hnet==0.0.1 +autocommand==2.2.2 +backports.tarfile==1.2.0 +importlib_metadata==8.0.0 +inflect==7.3.1 +jaraco.collections==5.1.0 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +more-itertools==10.3.0 +packaging==24.2 +platformdirs==4.2.2 +tomli==2.0.1 +typeguard==4.3.0 +typing_extensions==4.12.2 +wheel==0.45.1 +zipp==3.19.2 diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-metadata.json b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..86c933689fb5b76beb95a71a0facf6e6711b453c --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-metadata.json @@ -0,0 +1,70 @@ +{ + "os": "Linux-5.4.0-176-generic-x86_64-with-glibc2.35", + "python": "CPython 3.12.0", + "startedAt": "2026-04-25T17:57:54.046461Z", + "args": [ + "tracking=wandb", + "tracking.project=code-completion_lr-sweep", + "tracking.run_name=pythia_1b_lr_5e-5", + "training.lr=5e-5", + "paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5", + "model=pythia_1b", + "data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full" + ], + "program": "/workspace/byte-llms-code/code_completion_exp/train_pythia/train.py", + "codePath": "code_completion_exp/train_pythia/train.py", + "codePathLocal": "train.py", + "git": { + "remote": "https://github.com/naryst/byte-llms-code.git", + "commit": "f111e13281aa0dc58e24302edab5b0d5c2024586" + }, + "email": "nikita@local.ru", + "root": "/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5", + "host": "7504e518d24a", + "executable": "/venv/bytellm/bin/python", + "cpu_count": 64, + "cpu_count_logical": 128, + "gpu": "NVIDIA H100 80GB HBM3", + "gpu_count": 4, + "disk": { + "/": { + "total": "265214230528", + "used": "70239207424" + } + }, + "memory": { + "total": "1081679683584" + }, + "gpu_nvidia": [ + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-b60cdcab-2033-2009-41de-be646c953a20" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-9982b420-4520-4238-c378-ec5a46015474" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134" + } + ], + "cudaVersion": "12.2", + "writerId": "7tgf82pk1kp15jun4e833mn85qzvctn8" +} \ No newline at end of file diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-summary.json b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..c547a4cb23e371bb7b5b353a78a880461c2b2619 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/files/wandb-summary.json @@ -0,0 +1 @@ +{"_runtime":32.849936075,"train/loss_avg":1.2758313393220306,"train/step_time":0.2658518314361572,"_step":40,"train/lr":5e-05,"_timestamp":1.7771398919973965e+09,"train/epoch":1,"_wandb":{"runtime":32},"train/loss":1.024767443537712} \ No newline at end of file diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-core.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-core.log new file mode 100644 index 0000000000000000000000000000000000000000..a55691c24906599551984f0324560adc8d8c5419 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-core.log @@ -0,0 +1,16 @@ +{"time":"2026-04-25T17:57:54.13033512Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmp2zeogffm/port-59233.txt","pid":59233,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false,"enable-dcgm-profiling":false} +{"time":"2026-04-25T17:57:54.130725446Z","level":"INFO","msg":"server: will exit if parent process dies","ppid":59233} +{"time":"2026-04-25T17:57:54.130720947Z","level":"INFO","msg":"server: accepting connections","addr":{"Name":"/tmp/wandb-59233-59299-20282455/socket","Net":"unix"}} +{"time":"2026-04-25T17:57:54.318389906Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"1(@)"} +{"time":"2026-04-25T17:57:54.341747425Z","level":"INFO","msg":"handleInformInit: received","streamId":"xsz105vg","id":"1(@)"} +{"time":"2026-04-25T17:57:54.691929175Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"xsz105vg","id":"1(@)"} +{"time":"2026-04-25T17:58:28.236141582Z","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"xsz105vg","id":"1(@)"} +{"time":"2026-04-25T17:58:28.237178726Z","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"xsz105vg","id":"1(@)"} +{"time":"2026-04-25T17:58:28.255233201Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"1(@)"} +{"time":"2026-04-25T17:58:28.255256912Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"1(@)"} +{"time":"2026-04-25T17:58:28.255262179Z","level":"INFO","msg":"server is shutting down"} +{"time":"2026-04-25T17:58:28.255263152Z","level":"INFO","msg":"connection: closing","id":"1(@)"} +{"time":"2026-04-25T17:58:28.25530879Z","level":"INFO","msg":"connection: closed successfully","id":"1(@)"} +{"time":"2026-04-25T17:58:28.255325633Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"1(@)"} +{"time":"2026-04-25T17:58:28.25531338Z","level":"INFO","msg":"server: listener closed","addr":{"Name":"/tmp/wandb-59233-59299-20282455/socket","Net":"unix"}} +{"time":"2026-04-25T17:58:28.255336943Z","level":"INFO","msg":"server is closed"} diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-internal.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..155d70552a76a59347d162b23ebb3ca86bc24a0d --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-internal.log @@ -0,0 +1,13 @@ +{"time":"2026-04-25T17:57:54.341853376Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"} +{"time":"2026-04-25T17:57:54.691230711Z","level":"INFO","msg":"stream: created new stream","id":"xsz105vg"} +{"time":"2026-04-25T17:57:54.691290773Z","level":"INFO","msg":"handler: started","stream_id":"xsz105vg"} +{"time":"2026-04-25T17:57:54.691873268Z","level":"INFO","msg":"sender: started","stream_id":"xsz105vg"} +{"time":"2026-04-25T17:57:54.691867194Z","level":"INFO","msg":"stream: started","id":"xsz105vg"} +{"time":"2026-04-25T17:57:54.691864727Z","level":"INFO","msg":"writer: started","stream_id":"xsz105vg"} +{"time":"2026-04-25T17:57:54.82455882Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"} +{"time":"2026-04-25T17:58:28.064988507Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2026-04-25T17:58:28.233438156Z","level":"INFO","msg":"handler: operation stats","stats":{}} +{"time":"2026-04-25T17:58:28.236157176Z","level":"INFO","msg":"stream: closing","id":"xsz105vg"} +{"time":"2026-04-25T17:58:28.236164699Z","level":"INFO","msg":"handler: closed","stream_id":"xsz105vg"} +{"time":"2026-04-25T17:58:28.236219966Z","level":"INFO","msg":"sender: closed","stream_id":"xsz105vg"} +{"time":"2026-04-25T17:58:28.236226022Z","level":"INFO","msg":"stream: closed","id":"xsz105vg"} diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..ad30c4538c0eff1a613442e3b80d9c4517d430d0 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug.log @@ -0,0 +1,24 @@ +2026-04-25 17:57:54,047 INFO MainThread:59233 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0 +2026-04-25 17:57:54,047 INFO MainThread:59233 [wandb_setup.py:_flush():81] Configure stats pid to 59233 +2026-04-25 17:57:54,047 INFO MainThread:59233 [wandb_setup.py:_flush():81] Loading settings from environment variables +2026-04-25 17:57:54,048 INFO MainThread:59233 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug.log +2026-04-25 17:57:54,048 INFO MainThread:59233 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/logs/debug-internal.log +2026-04-25 17:57:54,048 INFO MainThread:59233 [wandb_init.py:init():844] calling init triggers +2026-04-25 17:57:54,048 INFO MainThread:59233 [wandb_init.py:init():849] wandb.init called with sweep_config: {} +config: {'model': {'name': 'EleutherAI/pythia-1b', 'checkpoint_path': None, 'from_scratch': False}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 12, 'gradient_accumulation_steps': 4, 'lr': 5e-05, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 4, 'pin_memory': True, 'max_train_samples': 20000, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 3000, 'eval_interval': 1000, 'save_every_epoch': True}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'pythia_1b_lr_5e-5', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_pythia/train.py'}} +2026-04-25 17:57:54,048 INFO MainThread:59233 [wandb_init.py:init():892] starting backend +2026-04-25 17:57:54,318 INFO MainThread:59233 [wandb_init.py:init():895] sending inform_init request +2026-04-25 17:57:54,340 INFO MainThread:59233 [wandb_init.py:init():903] backend started and connected +2026-04-25 17:57:54,343 INFO MainThread:59233 [wandb_init.py:init():973] updated telemetry +2026-04-25 17:57:54,360 INFO MainThread:59233 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout +2026-04-25 17:57:54,823 INFO MainThread:59233 [wandb_init.py:init():1044] starting run threads in backend +2026-04-25 17:57:54,982 INFO MainThread:59233 [wandb_run.py:_console_start():2529] atexit reg +2026-04-25 17:57:54,982 INFO MainThread:59233 [wandb_run.py:_redirect():2377] redirect: wrap_raw +2026-04-25 17:57:54,982 INFO MainThread:59233 [wandb_run.py:_redirect():2446] Wrapping output streams. +2026-04-25 17:57:54,982 INFO MainThread:59233 [wandb_run.py:_redirect():2469] Redirects installed. +2026-04-25 17:57:54,985 INFO MainThread:59233 [wandb_init.py:init():1084] run started, returning control to user process +2026-04-25 17:58:27,672 INFO MainThread:59233 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/xsz105vg +2026-04-25 17:58:27,673 INFO MainThread:59233 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0 +2026-04-25 17:58:27,673 INFO MainThread:59233 [wandb_run.py:_restore():2476] restore +2026-04-25 17:58:27,673 INFO MainThread:59233 [wandb_run.py:_restore():2482] restore done +2026-04-25 17:58:28,235 INFO MainThread:59233 [wandb_run.py:_footer_sync_info():3870] logging synced files diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/run-xsz105vg.wandb b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/run-xsz105vg.wandb new file mode 100644 index 0000000000000000000000000000000000000000..9dcdb918192e50f087d6430b266f4307070add35 Binary files /dev/null and b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_175754-xsz105vg/run-xsz105vg.wandb differ diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/code/code_completion_exp/train_pythia/train.py b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/code/code_completion_exp/train_pythia/train.py new file mode 100644 index 0000000000000000000000000000000000000000..a4739962b19b1d61085c8b55220470866db8aea1 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/code/code_completion_exp/train_pythia/train.py @@ -0,0 +1,606 @@ +""" +Training Pipeline для Pythia (decoder-only transformer) на задаче Code Completion. + +Конфигурация через Hydra + OmegaConf, логирование в Trackio. +Поддержка DDP через Accelerate для multi-GPU тренировки. + +Использование: + # Базовый запуск (single GPU) + python train.py + + # Multi-GPU с Accelerate + accelerate launch train.py + + # Multi-GPU с указанием количества GPU + accelerate launch --num_processes=4 train.py + + # Переопределение параметров через CLI + python train.py training.lr=1e-4 training.epochs=5 + + # Выбор другого конфига модели + python train.py model=pythia_160m + + # Multirun (sweep) + python train.py --multirun training.lr=1e-4,3e-4,1e-3 + + # Без логирования + python train.py tracking.enabled=false +""" + +import os +import math +import time +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from datasets import load_from_disk + +import hydra +from hydra.core.hydra_config import HydraConfig +from omegaconf import DictConfig, OmegaConf +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, + AutoConfig, + PreTrainedTokenizerBase, +) +from accelerate import Accelerator +from accelerate.utils import set_seed as accelerate_set_seed + +# Ensure repo root is on sys.path (needed when running from subdirectory) +import sys +sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +# Shared training library +from training_lib.utils import AverageMeter, log_message +from training_lib.checkpointing import save_checkpoint, load_checkpoint +from training_lib.schedulers import get_lr_scheduler +from training_lib.tracking import init_tracking, log_metrics, finish_tracking +from training_lib.validation import run_validation + + +# ============================================================================ +# ДАННЫЕ +# ============================================================================ + + +class CodeCompletionCollator: + """Collate function для батчирования примеров code completion.""" + + def __init__( + self, + tokenizer: PreTrainedTokenizerBase, + max_context_len: int = 1024, + max_target_len: int = 256, + ): + self.tokenizer = tokenizer + self.max_context_len = max_context_len + self.max_target_len = max_target_len + self.pad_token_id = tokenizer.pad_token_id + + def __call__(self, batch: list[dict]) -> dict: + contexts = [item["context"] for item in batch] + targets = [item["target"] for item in batch] + + encoded_contexts = self.tokenizer( + contexts, + add_special_tokens=True, + truncation=True, + max_length=self.max_context_len, + return_tensors=None, + ) + encoded_targets = self.tokenizer( + targets, + add_special_tokens=False, + truncation=True, + max_length=self.max_target_len, + return_tensors=None, + ) + + input_ids_list = [] + context_lengths = [] + + for ctx_ids, tgt_ids in zip( + encoded_contexts["input_ids"], encoded_targets["input_ids"] + ): + tgt_ids = tgt_ids + [self.tokenizer.eos_token_id] + context_lengths.append(len(ctx_ids)) + input_ids_list.append(ctx_ids + tgt_ids) + + max_len = max(len(ids) for ids in input_ids_list) + + padded_input_ids = [] + attention_mask = [] + + for ids in input_ids_list: + padding_len = max_len - len(ids) + padded_input_ids.append(ids + [self.pad_token_id] * padding_len) + attention_mask.append([1] * len(ids) + [0] * padding_len) + + return { + "input_ids": torch.tensor(padded_input_ids, dtype=torch.long), + "attention_mask": torch.tensor(attention_mask, dtype=torch.long), + "context_lengths": torch.tensor(context_lengths, dtype=torch.long), + } + + +def create_dataloaders( + cfg: DictConfig, tokenizer: PreTrainedTokenizerBase +) -> dict[str, DataLoader]: + """Создание DataLoader'ов для train и validation.""" + dataset_dict = load_from_disk(cfg.data.path) + + collator = CodeCompletionCollator( + tokenizer=tokenizer, + max_context_len=cfg.data.max_context_len, + max_target_len=cfg.data.max_target_len, + ) + + dataloaders = {} + + if "train" in dataset_dict: + train_dataset = dataset_dict["train"] + max_train = cfg.data.get("max_train_samples", None) + if max_train is not None: + train_dataset = train_dataset.select(range(min(max_train, len(train_dataset)))) + dataloaders["train"] = DataLoader( + train_dataset, + batch_size=cfg.training.batch_size, + shuffle=True, + collate_fn=collator, + num_workers=cfg.data.num_workers, + pin_memory=cfg.data.pin_memory, + ) + + if "validation" in dataset_dict: + val_dataset = dataset_dict["validation"] + max_val = cfg.data.get("max_val_samples", None) + if max_val is not None: + val_dataset = val_dataset.select(range(min(max_val, len(val_dataset)))) + eval_batch_size = cfg.training.get("eval_batch_size", cfg.training.batch_size) + dataloaders["validation"] = DataLoader( + val_dataset, + batch_size=eval_batch_size, + shuffle=False, + collate_fn=collator, + num_workers=cfg.data.num_workers, + pin_memory=cfg.data.pin_memory, + ) + + return dataloaders + + + + +# ============================================================================ +# LOSS ФУНКЦИИ +# ============================================================================ + + +def compute_loss( + logits: torch.Tensor, + input_ids: torch.Tensor, + context_lengths: torch.Tensor, + attention_mask: torch.Tensor, +) -> dict: + """Вычисление loss для авторегрессионной модели.""" + batch_size, seq_len, vocab_size = logits.shape + + shift_logits = logits[:, :-1, :].contiguous() + shift_labels = input_ids[:, 1:].contiguous() + shift_mask = attention_mask[:, 1:].contiguous() + + target_mask = torch.zeros_like(shift_labels, dtype=torch.bool) + for i in range(batch_size): + ctx_len = context_lengths[i].item() + target_mask[i, ctx_len - 1 :] = True + + final_mask = target_mask & shift_mask.bool() + + if final_mask.sum() > 0: + loss = F.cross_entropy( + shift_logits[final_mask], shift_labels[final_mask], reduction="mean" + ) + else: + loss = torch.tensor(0.0, device=logits.device) + + return {"loss": loss} + + +def _pythia_forward_loss( + model: nn.Module, + batch: dict, + cfg: DictConfig, + accelerator: Accelerator, +) -> dict: + """Forward + loss for a plain HF causal LM (attention_mask= kwarg, .logits).""" + input_ids = batch["input_ids"] + attention_mask = batch["attention_mask"] + context_lengths = batch["context_lengths"] + output = model(input_ids, attention_mask=attention_mask) + return compute_loss(output.logits, input_ids, context_lengths, attention_mask) + + +# ============================================================================ +# PARAMETER GROUPING +# ============================================================================ + + +def group_params(model: nn.Module, weight_decay: float) -> list[dict]: + """Группировка параметров для optimizer.""" + decay_params = [] + no_decay_params = [] + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + if "bias" in name or "LayerNorm" in name or "layernorm" in name: + no_decay_params.append(param) + else: + decay_params.append(param) + + return [ + {"params": decay_params, "weight_decay": weight_decay}, + {"params": no_decay_params, "weight_decay": 0.0}, + ] + + + + +# ============================================================================ +# TRAINING LOOP +# ============================================================================ + + +def train_epoch( + model: nn.Module, + dataloader: DataLoader, + optimizer: torch.optim.Optimizer, + scheduler, + cfg: DictConfig, + epoch: int, + global_step: int, + accelerator: Accelerator, + val_dataloader: DataLoader | None = None, + best_val_loss: float = float("inf"), +) -> tuple[int, float]: + """Один epoch тренировки. Возвращает (global_step, best_val_loss).""" + model.train() + + loss_meter = AverageMeter() + + optimizer.zero_grad() + accumulated_loss = 0.0 + accumulated_steps = 0 + + epoch_start_time = time.time() + step_start_time = time.time() + + for batch_idx, batch in enumerate(dataloader): + input_ids = batch["input_ids"] + attention_mask = batch["attention_mask"] + context_lengths = batch["context_lengths"] + + with accelerator.autocast(): + output = model(input_ids, attention_mask=attention_mask) + logits = output.logits + loss_dict = compute_loss( + logits, input_ids, context_lengths, attention_mask + ) + + loss = loss_dict["loss"] / cfg.training.gradient_accumulation_steps + accelerator.backward(loss) + + accumulated_loss += loss_dict["loss"].item() + accumulated_steps += 1 + + if accumulated_steps == cfg.training.gradient_accumulation_steps: + if cfg.training.max_grad_norm > 0: + accelerator.clip_grad_norm_( + model.parameters(), cfg.training.max_grad_norm + ) + + optimizer.step() + scheduler.step() + optimizer.zero_grad() + + avg_loss = accumulated_loss / cfg.training.gradient_accumulation_steps + loss_meter.update(avg_loss) + + global_step += 1 + + if global_step % cfg.logging.log_interval == 0: + step_time = time.time() - step_start_time + current_lr = scheduler.get_last_lr()[0] + + metrics = { + "train/loss": loss_meter.val, + "train/loss_avg": loss_meter.avg, + "train/lr": current_lr, + "train/epoch": epoch, + "train/step_time": step_time / cfg.logging.log_interval, + } + + log_metrics(metrics, step=global_step) + + log_message( + f"Epoch {epoch} | Step {global_step} | " + f"Loss: {loss_meter.avg:.4f} | " + f"LR: {current_lr:.2e}", + cfg, + accelerator, + ) + + step_start_time = time.time() + + if ( + cfg.logging.save_interval > 0 + and global_step % cfg.logging.save_interval == 0 + ): + save_checkpoint( + model, optimizer, scheduler, global_step, epoch, cfg, accelerator + ) + + eval_interval = cfg.logging.get("eval_interval", 0) + if ( + eval_interval > 0 + and val_dataloader is not None + and global_step % eval_interval == 0 + ): + val_metrics = run_validation( + model=model, + dataloader=val_dataloader, + cfg=cfg, + global_step=global_step, + accelerator=accelerator, + forward_loss_fn=_pythia_forward_loss, + ) + + if val_metrics["val/loss"] < best_val_loss: + best_val_loss = val_metrics["val/loss"] + if accelerator.is_main_process: + best_model_path = Path(cfg.paths.output_dir) / "model_best.pt" + unwrapped_model = accelerator.unwrap_model(model) + torch.save(unwrapped_model.state_dict(), best_model_path) + log_message( + f"New best model saved! Val loss: {best_val_loss:.4f}", + cfg, + accelerator + ) + + log_metrics( + { + "best/val_loss": best_val_loss, + "best/val_perplexity": val_metrics["val/perplexity"], + "best/step": global_step, + }, + step=global_step, + ) + + model.train() + + accumulated_loss = 0.0 + accumulated_steps = 0 + + epoch_time = time.time() - epoch_start_time + + log_message( + f"Epoch {epoch} completed in {epoch_time:.2f}s | " + f"Loss: {loss_meter.avg:.4f}", + cfg, + accelerator, + ) + + log_metrics({ + "epoch/loss": loss_meter.avg, + "epoch/time": epoch_time, + }) + + return global_step, best_val_loss + + +# ============================================================================ +# MAIN +# ============================================================================ + + +@hydra.main(version_base=None, config_path="configs", config_name="config") +def main(cfg: DictConfig): + """Главная функция тренировки с поддержкой DDP через Accelerate.""" + + # === Performance: Enable TF32 for faster matmuls on Ampere+ GPUs === + torch.set_float32_matmul_precision('high') + + # === Accelerator Setup === + mixed_precision = "bf16" if cfg.training.use_amp else "no" + + accelerator = Accelerator( + mixed_precision=mixed_precision, + gradient_accumulation_steps=cfg.training.gradient_accumulation_steps, + ) + + # === Setup === + accelerate_set_seed(cfg.seed) + + if cfg.paths.output_dir is None: + cfg.paths.output_dir = HydraConfig.get().runtime.output_dir + + OmegaConf.resolve(cfg) + + log_message(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'not set')}", cfg, accelerator) + log_message(f"Number of processes: {accelerator.num_processes}", cfg, accelerator) + log_message(f"Process index: {accelerator.process_index}", cfg, accelerator) + log_message(f"Mixed precision: {mixed_precision}", cfg, accelerator) + + log_message("=" * 60, cfg, accelerator) + log_message("Pythia Training Pipeline (Hydra + Trackio + Accelerate)", cfg, accelerator) + log_message("=" * 60, cfg, accelerator) + log_message(f"Config:\n{OmegaConf.to_yaml(cfg)}", cfg, accelerator) + + # === Trackio Init === + init_tracking(cfg, accelerator) + + # === Tokenizer === + log_message("Initializing tokenizer...", cfg, accelerator) + tokenizer = AutoTokenizer.from_pretrained(cfg.model.name) + + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + + # === Model === + log_message("Loading model...", cfg, accelerator) + + # Flash Attention 2 + torch_dtype = torch.bfloat16 if cfg.training.use_amp else torch.float32 + + if cfg.model.checkpoint_path: + model = AutoModelForCausalLM.from_pretrained( + cfg.model.name, + attn_implementation="flash_attention_2", + torch_dtype=torch_dtype, + ) + checkpoint = torch.load(cfg.model.checkpoint_path, map_location="cpu") + model.load_state_dict(checkpoint["model_state_dict"] if "model_state_dict" in checkpoint else checkpoint) + log_message(f"Loaded checkpoint: {cfg.model.checkpoint_path}", cfg, accelerator) + elif cfg.model.from_scratch: + config = AutoConfig.from_pretrained(cfg.model.name) + config._attn_implementation = "flash_attention_2" + model = AutoModelForCausalLM.from_config(config, torch_dtype=torch_dtype) + log_message(f"Initialized from scratch: {cfg.model.name}", cfg, accelerator) + else: + model = AutoModelForCausalLM.from_pretrained( + cfg.model.name, + attn_implementation="flash_attention_2", + torch_dtype=torch_dtype, + ) + log_message(f"Loaded pretrained: {cfg.model.name}", cfg, accelerator) + + model.train() + + # Log model info + total_params = sum(p.numel() for p in model.parameters()) + trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + log_message(f"Total params: {total_params:,}", cfg, accelerator) + log_message(f"Trainable params: {trainable_params:,}", cfg, accelerator) + + # === Data === + log_message("Creating dataloaders...", cfg, accelerator) + dataloaders = create_dataloaders(cfg, tokenizer) + + train_dataloader = dataloaders["train"] + val_dataloader = dataloaders.get("validation", None) + + log_message(f"Train dataset size: {len(train_dataloader.dataset)}", cfg, accelerator) + log_message(f"Train batches per epoch (before DDP split): {len(train_dataloader)}", cfg, accelerator) + + if val_dataloader: + log_message(f"Validation dataset size: {len(val_dataloader.dataset)}", cfg, accelerator) + log_message(f"Validation batches: {len(val_dataloader)}", cfg, accelerator) + else: + log_message("No validation dataset found", cfg, accelerator) + + # === Optimizer === + log_message("Creating optimizer...", cfg, accelerator) + param_groups = group_params(model, cfg.training.weight_decay) + + optimizer = torch.optim.AdamW( + param_groups, + lr=cfg.training.lr, + betas=tuple(cfg.training.betas), + eps=cfg.training.eps, + ) + + # === Scheduler === + steps_per_epoch = math.ceil( + len(train_dataloader) / accelerator.num_processes + ) + total_steps = ( + cfg.training.epochs + * steps_per_epoch + // cfg.training.gradient_accumulation_steps + ) + scheduler = get_lr_scheduler(optimizer, cfg, total_steps) + + log_message( + f"Total steps: {total_steps}, Steps per epoch: {steps_per_epoch}", + cfg, + accelerator + ) + + # === Accelerate Prepare === + log_message("Preparing model, optimizer, and dataloaders with Accelerate...", cfg, accelerator) + + if val_dataloader is not None: + model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare( + model, optimizer, train_dataloader, val_dataloader, scheduler + ) + else: + model, optimizer, train_dataloader, scheduler = accelerator.prepare( + model, optimizer, train_dataloader, scheduler + ) + + log_message(f"Train batches per epoch (after DDP split): {len(train_dataloader)}", cfg, accelerator) + + # === Resume === + global_step = 0 + start_epoch = 1 + + if cfg.training.resume and cfg.training.resume_checkpoint: + global_step, start_epoch = load_checkpoint( + model, optimizer, scheduler, cfg.training.resume_checkpoint, cfg, accelerator + ) + start_epoch += 1 + + # === Training Loop === + log_message("Starting training...", cfg, accelerator) + + best_val_loss = float("inf") + + try: + for epoch in range(start_epoch, cfg.training.epochs + 1): + log_message(f"\n{'=' * 60}", cfg, accelerator) + log_message(f"EPOCH {epoch}/{cfg.training.epochs}", cfg, accelerator) + log_message(f"{'=' * 60}", cfg, accelerator) + + global_step, best_val_loss = train_epoch( + model=model, + dataloader=train_dataloader, + optimizer=optimizer, + scheduler=scheduler, + cfg=cfg, + epoch=epoch, + global_step=global_step, + accelerator=accelerator, + val_dataloader=val_dataloader, + best_val_loss=best_val_loss, + ) + + if cfg.logging.save_every_epoch: + save_checkpoint( + model, optimizer, scheduler, global_step, epoch, cfg, accelerator + ) + + except KeyboardInterrupt: + log_message("Training interrupted by user", cfg, accelerator) + save_checkpoint(model, optimizer, scheduler, global_step, epoch, cfg, accelerator) + + # === Final Save === + log_message("\nTraining completed!", cfg, accelerator) + + if accelerator.is_main_process: + final_model_path = Path(cfg.paths.output_dir) / "model_final.pt" + unwrapped_model = accelerator.unwrap_model(model) + torch.save(unwrapped_model.state_dict(), final_model_path) + log_message(f"Final model: {final_model_path}", cfg, accelerator) + + accelerator.wait_for_everyone() + finish_tracking() + + +if __name__ == "__main__": + main() diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/config.yaml b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f40ae25fab45c1538085e41f6f46c232405e0122 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/config.yaml @@ -0,0 +1,146 @@ +_wandb: + value: + cli_version: 0.24.0 + code_path: code/code_completion_exp/train_pythia/train.py + e: + ustumeirj564la8awm2vaziyvcmzba88: + args: + - tracking=wandb + - tracking.project=code-completion_lr-sweep + - tracking.run_name=pythia_1b_lr_5e-5 + - training.lr=5e-5 + - paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + - model=pythia_1b + - data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + codePath: code_completion_exp/train_pythia/train.py + codePathLocal: train.py + cpu_count: 64 + cpu_count_logical: 128 + cudaVersion: "12.2" + disk: + /: + total: "265214230528" + used: "98730414080" + email: nikita@local.ru + executable: /venv/bytellm/bin/python + git: + commit: f111e13281aa0dc58e24302edab5b0d5c2024586 + remote: https://github.com/naryst/byte-llms-code.git + gpu: NVIDIA H100 80GB HBM3 + gpu_count: 4 + gpu_nvidia: + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-b60cdcab-2033-2009-41de-be646c953a20 + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-9982b420-4520-4238-c378-ec5a46015474 + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f + - architecture: Hopper + cudaCores: 16896 + memoryTotal: "85520809984" + name: NVIDIA H100 80GB HBM3 + uuid: GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134 + host: 7504e518d24a + memory: + total: "1081679683584" + os: Linux-5.4.0-176-generic-x86_64-with-glibc2.35 + program: /workspace/byte-llms-code/code_completion_exp/train_pythia/train.py + python: CPython 3.12.0 + root: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + startedAt: "2026-04-25T19:30:45.739561Z" + writerId: ustumeirj564la8awm2vaziyvcmzba88 + m: [] + python_version: 3.12.0 + t: + "1": + - 1 + - 11 + - 49 + - 50 + - 51 + - 71 + - 105 + "2": + - 1 + - 11 + - 49 + - 50 + - 51 + - 71 + - 105 + "3": + - 2 + - 13 + - 16 + - 61 + "4": 3.12.0 + "5": 0.24.0 + "6": 4.57.6 + "12": 0.24.0 + "13": linux-x86_64 +data: + value: + max_context_len: 4096 + max_target_len: 256 + max_train_samples: null + max_val_samples: 2000 + num_workers: 4 + path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full + pin_memory: true +device: + value: cuda +logging: + value: + eval_interval: 2000 + log_interval: 10 + save_every_epoch: false + save_interval: 0 +model: + value: + checkpoint_path: null + from_scratch: false + name: EleutherAI/pythia-1b +paths: + value: + output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 +seed: + value: 42 +tracking: + value: + backend: wandb + base_url: https://wandb.platun0v.ru + enabled: true + entity: null + local_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5 + project: code-completion_lr-sweep + run_name: pythia_1b_lr_5e-5 +training: + value: + batch_size: 4 + betas: + - 0.9 + - 0.95 + decay_ratio: 0.2 + epochs: 1 + eps: 1e-08 + eval_batch_size: 12 + gradient_accumulation_steps: 4 + lr: 5e-05 + lr_scheduler: wsd + max_grad_norm: 1 + min_lr_ratio: 0.1 + resume: false + resume_checkpoint: null + use_amp: true + warmup_ratio: 0.1 + warmup_steps: 100 + weight_decay: 0.1 diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/output.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..994e76aaee77969cef87d050bbebe34d8da94922 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/output.log @@ -0,0 +1,1056 @@ +[2026-04-25 19:30:46] Initializing tokenizer... +[2026-04-25 19:30:47] Loading model... +`torch_dtype` is deprecated! Use `dtype` instead! +[2026-04-25 19:30:50] Loaded pretrained: EleutherAI/pythia-1b +[2026-04-25 19:30:50] Total params: 1,011,781,632 +[2026-04-25 19:30:50] Trainable params: 1,011,781,632 +[2026-04-25 19:30:50] Creating dataloaders... +[2026-04-25 19:30:50] Train dataset size: 316397 +[2026-04-25 19:30:50] Train batches per epoch (before DDP split): 79100 +[2026-04-25 19:30:50] Validation dataset size: 2000 +[2026-04-25 19:30:50] Validation batches: 167 +[2026-04-25 19:30:50] Creating optimizer... +[2026-04-25 19:30:50] Total steps: 9887, Steps per epoch: 39550 +[2026-04-25 19:30:50] Preparing model, optimizer, and dataloaders with Accelerate... +[2026-04-25 19:30:51] Train batches per epoch (after DDP split): 39550 +[2026-04-25 19:30:51] Starting training... +[2026-04-25 19:30:51] +============================================================ +[2026-04-25 19:30:51] EPOCH 1/1 +[2026-04-25 19:30:51] ============================================================ +[2026-04-25 19:30:55] Epoch 1 | Step 10 | Loss: 2.1524 | LR: 5.91e-06 +[2026-04-25 19:30:57] Epoch 1 | Step 20 | Loss: 1.8675 | LR: 6.82e-06 +[2026-04-25 19:31:00] Epoch 1 | Step 30 | Loss: 1.6663 | LR: 7.73e-06 +[2026-04-25 19:31:03] Epoch 1 | Step 40 | Loss: 1.5603 | LR: 8.64e-06 +[2026-04-25 19:31:05] Epoch 1 | Step 50 | Loss: 1.4615 | LR: 9.55e-06 +[2026-04-25 19:31:08] Epoch 1 | Step 60 | Loss: 1.3987 | LR: 1.05e-05 +[2026-04-25 19:31:11] Epoch 1 | Step 70 | Loss: 1.3411 | LR: 1.14e-05 +[2026-04-25 19:31:13] Epoch 1 | Step 80 | Loss: 1.3206 | LR: 1.23e-05 +[2026-04-25 19:31:16] Epoch 1 | Step 90 | Loss: 1.2938 | LR: 1.32e-05 +[2026-04-25 19:31:19] Epoch 1 | Step 100 | Loss: 1.2705 | LR: 1.41e-05 +[2026-04-25 19:31:21] Epoch 1 | Step 110 | Loss: 1.2636 | LR: 1.50e-05 +[2026-04-25 19:31:24] Epoch 1 | Step 120 | Loss: 1.2522 | LR: 1.59e-05 +[2026-04-25 19:31:26] Epoch 1 | Step 130 | Loss: 1.2495 | LR: 1.68e-05 +[2026-04-25 19:31:29] Epoch 1 | Step 140 | Loss: 1.2478 | LR: 1.78e-05 +[2026-04-25 19:31:31] Epoch 1 | Step 150 | Loss: 1.2312 | LR: 1.87e-05 +[2026-04-25 19:31:34] Epoch 1 | Step 160 | Loss: 1.2177 | LR: 1.96e-05 +[2026-04-25 19:31:36] Epoch 1 | Step 170 | Loss: 1.2106 | LR: 2.05e-05 +[2026-04-25 19:31:39] Epoch 1 | Step 180 | Loss: 1.1969 | LR: 2.14e-05 +[2026-04-25 19:31:41] Epoch 1 | Step 190 | Loss: 1.1936 | LR: 2.23e-05 +[2026-04-25 19:31:44] Epoch 1 | Step 200 | Loss: 1.1900 | LR: 2.32e-05 +[2026-04-25 19:31:46] Epoch 1 | Step 210 | Loss: 1.1927 | LR: 2.41e-05 +[2026-04-25 19:31:49] Epoch 1 | Step 220 | Loss: 1.1902 | LR: 2.50e-05 +[2026-04-25 19:31:51] Epoch 1 | Step 230 | Loss: 1.1810 | LR: 2.60e-05 +[2026-04-25 19:31:54] Epoch 1 | Step 240 | Loss: 1.1758 | LR: 2.69e-05 +[2026-04-25 19:31:56] Epoch 1 | Step 250 | Loss: 1.1720 | LR: 2.78e-05 +[2026-04-25 19:31:59] Epoch 1 | Step 260 | Loss: 1.1742 | LR: 2.87e-05 +[2026-04-25 19:32:02] Epoch 1 | Step 270 | Loss: 1.1699 | LR: 2.96e-05 +[2026-04-25 19:32:04] Epoch 1 | Step 280 | Loss: 1.1642 | LR: 3.05e-05 +[2026-04-25 19:32:07] Epoch 1 | Step 290 | Loss: 1.1616 | LR: 3.14e-05 +[2026-04-25 19:32:09] Epoch 1 | Step 300 | Loss: 1.1595 | LR: 3.23e-05 +[2026-04-25 19:32:12] Epoch 1 | Step 310 | Loss: 1.1566 | LR: 3.32e-05 +[2026-04-25 19:32:14] Epoch 1 | Step 320 | Loss: 1.1522 | LR: 3.41e-05 +[2026-04-25 19:32:17] Epoch 1 | Step 330 | Loss: 1.1486 | LR: 3.51e-05 +[2026-04-25 19:32:20] Epoch 1 | Step 340 | Loss: 1.1466 | LR: 3.60e-05 +[2026-04-25 19:32:22] Epoch 1 | Step 350 | Loss: 1.1467 | LR: 3.69e-05 +[2026-04-25 19:32:25] Epoch 1 | Step 360 | Loss: 1.1434 | LR: 3.78e-05 +[2026-04-25 19:32:28] Epoch 1 | Step 370 | Loss: 1.1389 | LR: 3.87e-05 +[2026-04-25 19:32:31] Epoch 1 | Step 380 | Loss: 1.1363 | LR: 3.96e-05 +[2026-04-25 19:32:33] Epoch 1 | Step 390 | Loss: 1.1352 | LR: 4.05e-05 +[2026-04-25 19:32:36] Epoch 1 | Step 400 | Loss: 1.1346 | LR: 4.14e-05 +[2026-04-25 19:32:38] Epoch 1 | Step 410 | Loss: 1.1341 | LR: 4.23e-05 +[2026-04-25 19:32:41] Epoch 1 | Step 420 | Loss: 1.1331 | LR: 4.33e-05 +[2026-04-25 19:32:44] Epoch 1 | Step 430 | Loss: 1.1364 | LR: 4.42e-05 +[2026-04-25 19:32:46] Epoch 1 | Step 440 | Loss: 1.1334 | LR: 4.51e-05 +[2026-04-25 19:32:49] Epoch 1 | Step 450 | Loss: 1.1329 | LR: 4.60e-05 +[2026-04-25 19:32:52] Epoch 1 | Step 460 | Loss: 1.1340 | LR: 4.69e-05 +[2026-04-25 19:32:54] Epoch 1 | Step 470 | Loss: 1.1333 | LR: 4.78e-05 +[2026-04-25 19:32:57] Epoch 1 | Step 480 | Loss: 1.1345 | LR: 4.87e-05 +[2026-04-25 19:33:00] Epoch 1 | Step 490 | Loss: 1.1336 | LR: 4.96e-05 +[2026-04-25 19:33:02] Epoch 1 | Step 500 | Loss: 1.1338 | LR: 5.00e-05 +[2026-04-25 19:33:04] Epoch 1 | Step 510 | Loss: 1.1334 | LR: 5.00e-05 +[2026-04-25 19:33:07] Epoch 1 | Step 520 | Loss: 1.1345 | LR: 5.00e-05 +[2026-04-25 19:33:10] Epoch 1 | Step 530 | Loss: 1.1333 | LR: 5.00e-05 +[2026-04-25 19:33:12] Epoch 1 | Step 540 | Loss: 1.1320 | LR: 5.00e-05 +[2026-04-25 19:33:15] Epoch 1 | Step 550 | Loss: 1.1320 | LR: 5.00e-05 +[2026-04-25 19:33:17] Epoch 1 | Step 560 | Loss: 1.1316 | LR: 5.00e-05 +[2026-04-25 19:33:20] Epoch 1 | Step 570 | Loss: 1.1330 | LR: 5.00e-05 +[2026-04-25 19:33:22] Epoch 1 | Step 580 | Loss: 1.1348 | LR: 5.00e-05 +[2026-04-25 19:33:25] Epoch 1 | Step 590 | Loss: 1.1367 | LR: 5.00e-05 +[2026-04-25 19:33:28] Epoch 1 | Step 600 | Loss: 1.1377 | LR: 5.00e-05 +[2026-04-25 19:33:30] Epoch 1 | Step 610 | Loss: 1.1404 | LR: 5.00e-05 +[2026-04-25 19:33:32] Epoch 1 | Step 620 | Loss: 1.1430 | LR: 5.00e-05 +[2026-04-25 19:33:35] Epoch 1 | Step 630 | Loss: 1.1441 | LR: 5.00e-05 +[2026-04-25 19:33:37] Epoch 1 | Step 640 | Loss: 1.1462 | LR: 5.00e-05 +[2026-04-25 19:33:40] Epoch 1 | Step 650 | Loss: 1.1474 | LR: 5.00e-05 +[2026-04-25 19:33:42] Epoch 1 | Step 660 | Loss: 1.1492 | LR: 5.00e-05 +[2026-04-25 19:33:45] Epoch 1 | Step 670 | Loss: 1.1490 | LR: 5.00e-05 +[2026-04-25 19:33:48] Epoch 1 | Step 680 | Loss: 1.1500 | LR: 5.00e-05 +[2026-04-25 19:33:50] Epoch 1 | Step 690 | Loss: 1.1503 | LR: 5.00e-05 +[2026-04-25 19:33:53] Epoch 1 | Step 700 | Loss: 1.1522 | LR: 5.00e-05 +[2026-04-25 19:33:55] Epoch 1 | Step 710 | Loss: 1.1525 | LR: 5.00e-05 +[2026-04-25 19:33:58] Epoch 1 | Step 720 | Loss: 1.1535 | LR: 5.00e-05 +[2026-04-25 19:34:00] Epoch 1 | Step 730 | Loss: 1.1543 | LR: 5.00e-05 +[2026-04-25 19:34:03] Epoch 1 | Step 740 | Loss: 1.1542 | LR: 5.00e-05 +[2026-04-25 19:34:05] Epoch 1 | Step 750 | Loss: 1.1544 | LR: 5.00e-05 +[2026-04-25 19:34:08] Epoch 1 | Step 760 | Loss: 1.1564 | LR: 5.00e-05 +[2026-04-25 19:34:10] Epoch 1 | Step 770 | Loss: 1.1587 | LR: 5.00e-05 +[2026-04-25 19:34:13] Epoch 1 | Step 780 | Loss: 1.1598 | LR: 5.00e-05 +[2026-04-25 19:34:15] Epoch 1 | Step 790 | Loss: 1.1607 | LR: 5.00e-05 +[2026-04-25 19:34:18] Epoch 1 | Step 800 | Loss: 1.1601 | LR: 5.00e-05 +[2026-04-25 19:34:20] Epoch 1 | Step 810 | Loss: 1.1607 | LR: 5.00e-05 +[2026-04-25 19:34:23] Epoch 1 | Step 820 | Loss: 1.1608 | LR: 5.00e-05 +[2026-04-25 19:34:25] Epoch 1 | Step 830 | Loss: 1.1620 | LR: 5.00e-05 +[2026-04-25 19:34:28] Epoch 1 | Step 840 | Loss: 1.1619 | LR: 5.00e-05 +[2026-04-25 19:34:31] Epoch 1 | Step 850 | Loss: 1.1610 | LR: 5.00e-05 +[2026-04-25 19:34:33] Epoch 1 | Step 860 | Loss: 1.1625 | LR: 5.00e-05 +[2026-04-25 19:34:36] Epoch 1 | Step 870 | Loss: 1.1644 | LR: 5.00e-05 +[2026-04-25 19:34:38] Epoch 1 | Step 880 | Loss: 1.1657 | LR: 5.00e-05 +[2026-04-25 19:34:40] Epoch 1 | Step 890 | Loss: 1.1664 | LR: 5.00e-05 +[2026-04-25 19:34:43] Epoch 1 | Step 900 | Loss: 1.1663 | LR: 5.00e-05 +[2026-04-25 19:34:45] Epoch 1 | Step 910 | Loss: 1.1678 | LR: 5.00e-05 +[2026-04-25 19:34:48] Epoch 1 | Step 920 | Loss: 1.1698 | LR: 5.00e-05 +[2026-04-25 19:34:51] Epoch 1 | Step 930 | Loss: 1.1699 | LR: 5.00e-05 +[2026-04-25 19:34:53] Epoch 1 | Step 940 | Loss: 1.1709 | LR: 5.00e-05 +[2026-04-25 19:34:56] Epoch 1 | Step 950 | Loss: 1.1697 | LR: 5.00e-05 +[2026-04-25 19:34:59] Epoch 1 | Step 960 | Loss: 1.1699 | LR: 5.00e-05 +[2026-04-25 19:35:02] Epoch 1 | Step 970 | Loss: 1.1707 | LR: 5.00e-05 +[2026-04-25 19:35:04] Epoch 1 | Step 980 | Loss: 1.1705 | LR: 5.00e-05 +[2026-04-25 19:35:07] Epoch 1 | Step 990 | Loss: 1.1698 | LR: 5.00e-05 +[2026-04-25 19:35:10] Epoch 1 | Step 1000 | Loss: 1.1701 | LR: 5.00e-05 +[2026-04-25 19:35:12] Epoch 1 | Step 1010 | Loss: 1.1715 | LR: 5.00e-05 +[2026-04-25 19:35:14] Epoch 1 | Step 1020 | Loss: 1.1718 | LR: 5.00e-05 +[2026-04-25 19:35:17] Epoch 1 | Step 1030 | Loss: 1.1732 | LR: 5.00e-05 +[2026-04-25 19:35:19] Epoch 1 | Step 1040 | Loss: 1.1720 | LR: 5.00e-05 +[2026-04-25 19:35:22] Epoch 1 | Step 1050 | Loss: 1.1719 | LR: 5.00e-05 +[2026-04-25 19:35:24] Epoch 1 | Step 1060 | Loss: 1.1707 | LR: 5.00e-05 +[2026-04-25 19:35:27] Epoch 1 | Step 1070 | Loss: 1.1707 | LR: 5.00e-05 +[2026-04-25 19:35:29] Epoch 1 | Step 1080 | Loss: 1.1726 | LR: 5.00e-05 +[2026-04-25 19:35:32] Epoch 1 | Step 1090 | Loss: 1.1749 | LR: 5.00e-05 +[2026-04-25 19:35:34] Epoch 1 | Step 1100 | Loss: 1.1750 | LR: 5.00e-05 +[2026-04-25 19:35:37] Epoch 1 | Step 1110 | Loss: 1.1760 | LR: 5.00e-05 +[2026-04-25 19:35:39] Epoch 1 | Step 1120 | Loss: 1.1770 | LR: 5.00e-05 +[2026-04-25 19:35:42] Epoch 1 | Step 1130 | Loss: 1.1777 | LR: 5.00e-05 +[2026-04-25 19:35:45] Epoch 1 | Step 1140 | Loss: 1.1778 | LR: 5.00e-05 +[2026-04-25 19:35:47] Epoch 1 | Step 1150 | Loss: 1.1764 | LR: 5.00e-05 +[2026-04-25 19:35:50] Epoch 1 | Step 1160 | Loss: 1.1777 | LR: 5.00e-05 +[2026-04-25 19:35:52] Epoch 1 | Step 1170 | Loss: 1.1790 | LR: 5.00e-05 +[2026-04-25 19:35:55] Epoch 1 | Step 1180 | Loss: 1.1791 | LR: 5.00e-05 +[2026-04-25 19:35:58] Epoch 1 | Step 1190 | Loss: 1.1799 | LR: 5.00e-05 +[2026-04-25 19:36:00] Epoch 1 | Step 1200 | Loss: 1.1797 | LR: 5.00e-05 +[2026-04-25 19:36:03] Epoch 1 | Step 1210 | Loss: 1.1787 | LR: 5.00e-05 +[2026-04-25 19:36:05] Epoch 1 | Step 1220 | Loss: 1.1775 | LR: 5.00e-05 +[2026-04-25 19:36:08] Epoch 1 | Step 1230 | Loss: 1.1783 | LR: 5.00e-05 +[2026-04-25 19:36:11] Epoch 1 | Step 1240 | Loss: 1.1790 | LR: 5.00e-05 +[2026-04-25 19:36:13] Epoch 1 | Step 1250 | Loss: 1.1786 | LR: 5.00e-05 +[2026-04-25 19:36:16] Epoch 1 | Step 1260 | Loss: 1.1788 | LR: 5.00e-05 +[2026-04-25 19:36:18] Epoch 1 | Step 1270 | Loss: 1.1777 | LR: 5.00e-05 +[2026-04-25 19:36:20] Epoch 1 | Step 1280 | Loss: 1.1787 | LR: 5.00e-05 +[2026-04-25 19:36:23] Epoch 1 | Step 1290 | Loss: 1.1795 | LR: 5.00e-05 +[2026-04-25 19:36:26] Epoch 1 | Step 1300 | Loss: 1.1790 | LR: 5.00e-05 +[2026-04-25 19:36:28] Epoch 1 | Step 1310 | Loss: 1.1792 | LR: 5.00e-05 +[2026-04-25 19:36:31] Epoch 1 | Step 1320 | Loss: 1.1798 | LR: 5.00e-05 +[2026-04-25 19:36:33] Epoch 1 | Step 1330 | Loss: 1.1793 | LR: 5.00e-05 +[2026-04-25 19:36:35] Epoch 1 | Step 1340 | Loss: 1.1796 | LR: 5.00e-05 +[2026-04-25 19:36:38] Epoch 1 | Step 1350 | Loss: 1.1804 | LR: 5.00e-05 +[2026-04-25 19:36:41] Epoch 1 | Step 1360 | Loss: 1.1804 | LR: 5.00e-05 +[2026-04-25 19:36:43] Epoch 1 | Step 1370 | Loss: 1.1805 | LR: 5.00e-05 +[2026-04-25 19:36:46] Epoch 1 | Step 1380 | Loss: 1.1817 | LR: 5.00e-05 +[2026-04-25 19:36:48] Epoch 1 | Step 1390 | Loss: 1.1826 | LR: 5.00e-05 +[2026-04-25 19:36:51] Epoch 1 | Step 1400 | Loss: 1.1828 | LR: 5.00e-05 +[2026-04-25 19:36:54] Epoch 1 | Step 1410 | Loss: 1.1819 | LR: 5.00e-05 +[2026-04-25 19:36:56] Epoch 1 | Step 1420 | Loss: 1.1821 | LR: 5.00e-05 +[2026-04-25 19:36:59] Epoch 1 | Step 1430 | Loss: 1.1823 | LR: 5.00e-05 +[2026-04-25 19:37:01] Epoch 1 | Step 1440 | Loss: 1.1822 | LR: 5.00e-05 +[2026-04-25 19:37:04] Epoch 1 | Step 1450 | Loss: 1.1824 | LR: 5.00e-05 +[2026-04-25 19:37:06] Epoch 1 | Step 1460 | Loss: 1.1814 | LR: 5.00e-05 +[2026-04-25 19:37:09] Epoch 1 | Step 1470 | Loss: 1.1823 | LR: 5.00e-05 +[2026-04-25 19:37:12] Epoch 1 | Step 1480 | Loss: 1.1825 | LR: 5.00e-05 +[2026-04-25 19:37:14] Epoch 1 | Step 1490 | Loss: 1.1834 | LR: 5.00e-05 +[2026-04-25 19:37:16] Epoch 1 | Step 1500 | Loss: 1.1832 | LR: 5.00e-05 +[2026-04-25 19:37:19] Epoch 1 | Step 1510 | Loss: 1.1836 | LR: 5.00e-05 +[2026-04-25 19:37:22] Epoch 1 | Step 1520 | Loss: 1.1842 | LR: 5.00e-05 +[2026-04-25 19:37:24] Epoch 1 | Step 1530 | Loss: 1.1842 | LR: 5.00e-05 +[2026-04-25 19:37:26] Epoch 1 | Step 1540 | Loss: 1.1850 | LR: 5.00e-05 +[2026-04-25 19:37:29] Epoch 1 | Step 1550 | Loss: 1.1855 | LR: 5.00e-05 +[2026-04-25 19:37:31] Epoch 1 | Step 1560 | Loss: 1.1850 | LR: 5.00e-05 +[2026-04-25 19:37:34] Epoch 1 | Step 1570 | Loss: 1.1859 | LR: 5.00e-05 +[2026-04-25 19:37:37] Epoch 1 | Step 1580 | Loss: 1.1856 | LR: 5.00e-05 +[2026-04-25 19:37:39] Epoch 1 | Step 1590 | Loss: 1.1859 | LR: 5.00e-05 +[2026-04-25 19:37:41] Epoch 1 | Step 1600 | Loss: 1.1862 | LR: 5.00e-05 +[2026-04-25 19:37:44] Epoch 1 | Step 1610 | Loss: 1.1852 | LR: 5.00e-05 +[2026-04-25 19:37:46] Epoch 1 | Step 1620 | Loss: 1.1843 | LR: 5.00e-05 +[2026-04-25 19:37:49] Epoch 1 | Step 1630 | Loss: 1.1851 | LR: 5.00e-05 +[2026-04-25 19:37:51] Epoch 1 | Step 1640 | Loss: 1.1852 | LR: 5.00e-05 +[2026-04-25 19:37:54] Epoch 1 | Step 1650 | Loss: 1.1847 | LR: 5.00e-05 +[2026-04-25 19:37:56] Epoch 1 | Step 1660 | Loss: 1.1842 | LR: 5.00e-05 +[2026-04-25 19:37:58] Epoch 1 | Step 1670 | Loss: 1.1854 | LR: 5.00e-05 +[2026-04-25 19:38:01] Epoch 1 | Step 1680 | Loss: 1.1857 | LR: 5.00e-05 +[2026-04-25 19:38:04] Epoch 1 | Step 1690 | Loss: 1.1855 | LR: 5.00e-05 +[2026-04-25 19:38:06] Epoch 1 | Step 1700 | Loss: 1.1848 | LR: 5.00e-05 +[2026-04-25 19:38:09] Epoch 1 | Step 1710 | Loss: 1.1846 | LR: 5.00e-05 +[2026-04-25 19:38:11] Epoch 1 | Step 1720 | Loss: 1.1844 | LR: 5.00e-05 +[2026-04-25 19:38:14] Epoch 1 | Step 1730 | Loss: 1.1842 | LR: 5.00e-05 +[2026-04-25 19:38:16] Epoch 1 | Step 1740 | Loss: 1.1845 | LR: 5.00e-05 +[2026-04-25 19:38:19] Epoch 1 | Step 1750 | Loss: 1.1859 | LR: 5.00e-05 +[2026-04-25 19:38:21] Epoch 1 | Step 1760 | Loss: 1.1856 | LR: 5.00e-05 +[2026-04-25 19:38:24] Epoch 1 | Step 1770 | Loss: 1.1861 | LR: 5.00e-05 +[2026-04-25 19:38:26] Epoch 1 | Step 1780 | Loss: 1.1860 | LR: 5.00e-05 +[2026-04-25 19:38:29] Epoch 1 | Step 1790 | Loss: 1.1865 | LR: 5.00e-05 +[2026-04-25 19:38:31] Epoch 1 | Step 1800 | Loss: 1.1860 | LR: 5.00e-05 +[2026-04-25 19:38:34] Epoch 1 | Step 1810 | Loss: 1.1864 | LR: 5.00e-05 +[2026-04-25 19:38:37] Epoch 1 | Step 1820 | Loss: 1.1870 | LR: 5.00e-05 +[2026-04-25 19:38:39] Epoch 1 | Step 1830 | Loss: 1.1873 | LR: 5.00e-05 +[2026-04-25 19:38:42] Epoch 1 | Step 1840 | Loss: 1.1877 | LR: 5.00e-05 +[2026-04-25 19:38:44] Epoch 1 | Step 1850 | Loss: 1.1874 | LR: 5.00e-05 +[2026-04-25 19:38:47] Epoch 1 | Step 1860 | Loss: 1.1879 | LR: 5.00e-05 +[2026-04-25 19:38:49] Epoch 1 | Step 1870 | Loss: 1.1876 | LR: 5.00e-05 +[2026-04-25 19:38:52] Epoch 1 | Step 1880 | Loss: 1.1871 | LR: 5.00e-05 +[2026-04-25 19:38:55] Epoch 1 | Step 1890 | Loss: 1.1877 | LR: 5.00e-05 +[2026-04-25 19:38:58] Epoch 1 | Step 1900 | Loss: 1.1877 | LR: 5.00e-05 +[2026-04-25 19:39:00] Epoch 1 | Step 1910 | Loss: 1.1881 | LR: 5.00e-05 +[2026-04-25 19:39:02] Epoch 1 | Step 1920 | Loss: 1.1887 | LR: 5.00e-05 +[2026-04-25 19:39:05] Epoch 1 | Step 1930 | Loss: 1.1888 | LR: 5.00e-05 +[2026-04-25 19:39:08] Epoch 1 | Step 1940 | Loss: 1.1886 | LR: 5.00e-05 +[2026-04-25 19:39:10] Epoch 1 | Step 1950 | Loss: 1.1883 | LR: 5.00e-05 +[2026-04-25 19:39:13] Epoch 1 | Step 1960 | Loss: 1.1885 | LR: 5.00e-05 +[2026-04-25 19:39:15] Epoch 1 | Step 1970 | Loss: 1.1887 | LR: 5.00e-05 +[2026-04-25 19:39:17] Epoch 1 | Step 1980 | Loss: 1.1894 | LR: 5.00e-05 +[2026-04-25 19:39:20] Epoch 1 | Step 1990 | Loss: 1.1897 | LR: 5.00e-05 +[2026-04-25 19:39:22] Epoch 1 | Step 2000 | Loss: 1.1897 | LR: 5.00e-05 +[2026-04-25 19:39:23] Validation | Batch 10/84 | Loss: 1.1552 +[2026-04-25 19:39:23] Validation | Batch 20/84 | Loss: 1.1688 +[2026-04-25 19:39:24] Validation | Batch 30/84 | Loss: 1.2551 +[2026-04-25 19:39:24] Validation | Batch 40/84 | Loss: 1.2580 +[2026-04-25 19:39:24] Validation | Batch 50/84 | Loss: 1.2545 +[2026-04-25 19:39:25] Validation | Batch 60/84 | Loss: 1.2285 +[2026-04-25 19:39:26] Validation | Batch 70/84 | Loss: 1.2086 +[2026-04-25 19:39:26] Validation | Batch 80/84 | Loss: 1.2153 +[2026-04-25 19:39:26] Validation | Batch 84/84 | Loss: 1.2082 +[2026-04-25 19:39:27] Validation | Loss: 1.2082 | PPL: 3.43 | Time: 3.85s +[2026-04-25 19:39:29] New best model saved! Val loss: 1.2082 +[2026-04-25 19:39:31] Epoch 1 | Step 2010 | Loss: 1.1900 | LR: 5.00e-05 +[2026-04-25 19:39:34] Epoch 1 | Step 2020 | Loss: 1.1900 | LR: 5.00e-05 +[2026-04-25 19:39:36] Epoch 1 | Step 2030 | Loss: 1.1905 | LR: 5.00e-05 +[2026-04-25 19:39:39] Epoch 1 | Step 2040 | Loss: 1.1907 | LR: 5.00e-05 +[2026-04-25 19:39:41] Epoch 1 | Step 2050 | Loss: 1.1909 | LR: 5.00e-05 +[2026-04-25 19:39:44] Epoch 1 | Step 2060 | Loss: 1.1907 | LR: 5.00e-05 +[2026-04-25 19:39:47] Epoch 1 | Step 2070 | Loss: 1.1899 | LR: 5.00e-05 +[2026-04-25 19:39:49] Epoch 1 | Step 2080 | Loss: 1.1895 | LR: 5.00e-05 +[2026-04-25 19:39:51] Epoch 1 | Step 2090 | Loss: 1.1900 | LR: 5.00e-05 +[2026-04-25 19:39:54] Epoch 1 | Step 2100 | Loss: 1.1903 | LR: 5.00e-05 +[2026-04-25 19:39:57] Epoch 1 | Step 2110 | Loss: 1.1904 | LR: 5.00e-05 +[2026-04-25 19:39:59] Epoch 1 | Step 2120 | Loss: 1.1902 | LR: 5.00e-05 +[2026-04-25 19:40:02] Epoch 1 | Step 2130 | Loss: 1.1906 | LR: 5.00e-05 +[2026-04-25 19:40:04] Epoch 1 | Step 2140 | Loss: 1.1907 | LR: 5.00e-05 +[2026-04-25 19:40:07] Epoch 1 | Step 2150 | Loss: 1.1907 | LR: 5.00e-05 +[2026-04-25 19:40:09] Epoch 1 | Step 2160 | Loss: 1.1912 | LR: 5.00e-05 +[2026-04-25 19:40:12] Epoch 1 | Step 2170 | Loss: 1.1910 | LR: 5.00e-05 +[2026-04-25 19:40:14] Epoch 1 | Step 2180 | Loss: 1.1907 | LR: 5.00e-05 +[2026-04-25 19:40:16] Epoch 1 | Step 2190 | Loss: 1.1910 | LR: 5.00e-05 +[2026-04-25 19:40:19] Epoch 1 | Step 2200 | Loss: 1.1911 | LR: 5.00e-05 +[2026-04-25 19:40:21] Epoch 1 | Step 2210 | Loss: 1.1912 | LR: 5.00e-05 +[2026-04-25 19:40:24] Epoch 1 | Step 2220 | Loss: 1.1920 | LR: 5.00e-05 +[2026-04-25 19:40:26] Epoch 1 | Step 2230 | Loss: 1.1929 | LR: 5.00e-05 +[2026-04-25 19:40:29] Epoch 1 | Step 2240 | Loss: 1.1936 | LR: 5.00e-05 +[2026-04-25 19:40:31] Epoch 1 | Step 2250 | Loss: 1.1940 | LR: 5.00e-05 +[2026-04-25 19:40:34] Epoch 1 | Step 2260 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:40:37] Epoch 1 | Step 2270 | Loss: 1.1941 | LR: 5.00e-05 +[2026-04-25 19:40:39] Epoch 1 | Step 2280 | Loss: 1.1945 | LR: 5.00e-05 +[2026-04-25 19:40:42] Epoch 1 | Step 2290 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:40:44] Epoch 1 | Step 2300 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:40:46] Epoch 1 | Step 2310 | Loss: 1.1956 | LR: 5.00e-05 +[2026-04-25 19:40:49] Epoch 1 | Step 2320 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:40:51] Epoch 1 | Step 2330 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:40:54] Epoch 1 | Step 2340 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:40:56] Epoch 1 | Step 2350 | Loss: 1.1956 | LR: 5.00e-05 +[2026-04-25 19:40:59] Epoch 1 | Step 2360 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:41:01] Epoch 1 | Step 2370 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:41:04] Epoch 1 | Step 2380 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:41:06] Epoch 1 | Step 2390 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:41:09] Epoch 1 | Step 2400 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:41:11] Epoch 1 | Step 2410 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:41:14] Epoch 1 | Step 2420 | Loss: 1.1964 | LR: 5.00e-05 +[2026-04-25 19:41:16] Epoch 1 | Step 2430 | Loss: 1.1966 | LR: 5.00e-05 +[2026-04-25 19:41:19] Epoch 1 | Step 2440 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:41:22] Epoch 1 | Step 2450 | Loss: 1.1961 | LR: 5.00e-05 +[2026-04-25 19:41:24] Epoch 1 | Step 2460 | Loss: 1.1961 | LR: 5.00e-05 +[2026-04-25 19:41:27] Epoch 1 | Step 2470 | Loss: 1.1963 | LR: 5.00e-05 +[2026-04-25 19:41:29] Epoch 1 | Step 2480 | Loss: 1.1964 | LR: 5.00e-05 +[2026-04-25 19:41:32] Epoch 1 | Step 2490 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:41:35] Epoch 1 | Step 2500 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:41:37] Epoch 1 | Step 2510 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:41:39] Epoch 1 | Step 2520 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:41:42] Epoch 1 | Step 2530 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:41:44] Epoch 1 | Step 2540 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:41:47] Epoch 1 | Step 2550 | Loss: 1.1947 | LR: 5.00e-05 +[2026-04-25 19:41:50] Epoch 1 | Step 2560 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:41:52] Epoch 1 | Step 2570 | Loss: 1.1953 | LR: 5.00e-05 +[2026-04-25 19:41:55] Epoch 1 | Step 2580 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:41:58] Epoch 1 | Step 2590 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:42:00] Epoch 1 | Step 2600 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:42:02] Epoch 1 | Step 2610 | Loss: 1.1963 | LR: 5.00e-05 +[2026-04-25 19:42:05] Epoch 1 | Step 2620 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:42:07] Epoch 1 | Step 2630 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:42:10] Epoch 1 | Step 2640 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:42:13] Epoch 1 | Step 2650 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:42:15] Epoch 1 | Step 2660 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:42:18] Epoch 1 | Step 2670 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:42:20] Epoch 1 | Step 2680 | Loss: 1.1956 | LR: 5.00e-05 +[2026-04-25 19:42:23] Epoch 1 | Step 2690 | Loss: 1.1955 | LR: 5.00e-05 +[2026-04-25 19:42:25] Epoch 1 | Step 2700 | Loss: 1.1953 | LR: 5.00e-05 +[2026-04-25 19:42:28] Epoch 1 | Step 2710 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:42:31] Epoch 1 | Step 2720 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:42:33] Epoch 1 | Step 2730 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:42:35] Epoch 1 | Step 2740 | Loss: 1.1955 | LR: 5.00e-05 +[2026-04-25 19:42:38] Epoch 1 | Step 2750 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:42:40] Epoch 1 | Step 2760 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:42:43] Epoch 1 | Step 2770 | Loss: 1.1953 | LR: 5.00e-05 +[2026-04-25 19:42:45] Epoch 1 | Step 2780 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:42:48] Epoch 1 | Step 2790 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:42:50] Epoch 1 | Step 2800 | Loss: 1.1956 | LR: 5.00e-05 +[2026-04-25 19:42:53] Epoch 1 | Step 2810 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:42:55] Epoch 1 | Step 2820 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:42:58] Epoch 1 | Step 2830 | Loss: 1.1957 | LR: 5.00e-05 +[2026-04-25 19:43:00] Epoch 1 | Step 2840 | Loss: 1.1965 | LR: 5.00e-05 +[2026-04-25 19:43:03] Epoch 1 | Step 2850 | Loss: 1.1966 | LR: 5.00e-05 +[2026-04-25 19:43:05] Epoch 1 | Step 2860 | Loss: 1.1966 | LR: 5.00e-05 +[2026-04-25 19:43:08] Epoch 1 | Step 2870 | Loss: 1.1968 | LR: 5.00e-05 +[2026-04-25 19:43:11] Epoch 1 | Step 2880 | Loss: 1.1965 | LR: 5.00e-05 +[2026-04-25 19:43:13] Epoch 1 | Step 2890 | Loss: 1.1964 | LR: 5.00e-05 +[2026-04-25 19:43:16] Epoch 1 | Step 2900 | Loss: 1.1959 | LR: 5.00e-05 +[2026-04-25 19:43:18] Epoch 1 | Step 2910 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:43:21] Epoch 1 | Step 2920 | Loss: 1.1961 | LR: 5.00e-05 +[2026-04-25 19:43:24] Epoch 1 | Step 2930 | Loss: 1.1960 | LR: 5.00e-05 +[2026-04-25 19:43:27] Epoch 1 | Step 2940 | Loss: 1.1958 | LR: 5.00e-05 +[2026-04-25 19:43:29] Epoch 1 | Step 2950 | Loss: 1.1961 | LR: 5.00e-05 +[2026-04-25 19:43:32] Epoch 1 | Step 2960 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:43:34] Epoch 1 | Step 2970 | Loss: 1.1963 | LR: 5.00e-05 +[2026-04-25 19:43:37] Epoch 1 | Step 2980 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:43:39] Epoch 1 | Step 2990 | Loss: 1.1965 | LR: 5.00e-05 +[2026-04-25 19:43:42] Epoch 1 | Step 3000 | Loss: 1.1964 | LR: 5.00e-05 +[2026-04-25 19:43:45] Epoch 1 | Step 3010 | Loss: 1.1965 | LR: 5.00e-05 +[2026-04-25 19:43:47] Epoch 1 | Step 3020 | Loss: 1.1962 | LR: 5.00e-05 +[2026-04-25 19:43:50] Epoch 1 | Step 3030 | Loss: 1.1961 | LR: 5.00e-05 +[2026-04-25 19:43:52] Epoch 1 | Step 3040 | Loss: 1.1955 | LR: 5.00e-05 +[2026-04-25 19:43:55] Epoch 1 | Step 3050 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:43:57] Epoch 1 | Step 3060 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:44:00] Epoch 1 | Step 3070 | Loss: 1.1950 | LR: 5.00e-05 +[2026-04-25 19:44:03] Epoch 1 | Step 3080 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:44:05] Epoch 1 | Step 3090 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:44:07] Epoch 1 | Step 3100 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:44:10] Epoch 1 | Step 3110 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:44:12] Epoch 1 | Step 3120 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:44:15] Epoch 1 | Step 3130 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:44:18] Epoch 1 | Step 3140 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:44:20] Epoch 1 | Step 3150 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:44:23] Epoch 1 | Step 3160 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:44:25] Epoch 1 | Step 3170 | Loss: 1.1955 | LR: 5.00e-05 +[2026-04-25 19:44:28] Epoch 1 | Step 3180 | Loss: 1.1956 | LR: 5.00e-05 +[2026-04-25 19:44:30] Epoch 1 | Step 3190 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:44:33] Epoch 1 | Step 3200 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:44:35] Epoch 1 | Step 3210 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:44:38] Epoch 1 | Step 3220 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:44:40] Epoch 1 | Step 3230 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:44:43] Epoch 1 | Step 3240 | Loss: 1.1950 | LR: 5.00e-05 +[2026-04-25 19:44:45] Epoch 1 | Step 3250 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:44:48] Epoch 1 | Step 3260 | Loss: 1.1953 | LR: 5.00e-05 +[2026-04-25 19:44:50] Epoch 1 | Step 3270 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:44:53] Epoch 1 | Step 3280 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:44:55] Epoch 1 | Step 3290 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:44:58] Epoch 1 | Step 3300 | Loss: 1.1950 | LR: 5.00e-05 +[2026-04-25 19:45:00] Epoch 1 | Step 3310 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:03] Epoch 1 | Step 3320 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:45:06] Epoch 1 | Step 3330 | Loss: 1.1950 | LR: 5.00e-05 +[2026-04-25 19:45:09] Epoch 1 | Step 3340 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:45:11] Epoch 1 | Step 3350 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:45:13] Epoch 1 | Step 3360 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:45:16] Epoch 1 | Step 3370 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:19] Epoch 1 | Step 3380 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:45:22] Epoch 1 | Step 3390 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:24] Epoch 1 | Step 3400 | Loss: 1.1954 | LR: 5.00e-05 +[2026-04-25 19:45:27] Epoch 1 | Step 3410 | Loss: 1.1952 | LR: 5.00e-05 +[2026-04-25 19:45:29] Epoch 1 | Step 3420 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:32] Epoch 1 | Step 3430 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:35] Epoch 1 | Step 3440 | Loss: 1.1951 | LR: 5.00e-05 +[2026-04-25 19:45:37] Epoch 1 | Step 3450 | Loss: 1.1950 | LR: 5.00e-05 +[2026-04-25 19:45:40] Epoch 1 | Step 3460 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:42] Epoch 1 | Step 3470 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:45] Epoch 1 | Step 3480 | Loss: 1.1949 | LR: 5.00e-05 +[2026-04-25 19:45:47] Epoch 1 | Step 3490 | Loss: 1.1947 | LR: 5.00e-05 +[2026-04-25 19:45:50] Epoch 1 | Step 3500 | Loss: 1.1943 | LR: 5.00e-05 +[2026-04-25 19:45:53] Epoch 1 | Step 3510 | Loss: 1.1947 | LR: 5.00e-05 +[2026-04-25 19:45:55] Epoch 1 | Step 3520 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:45:58] Epoch 1 | Step 3530 | Loss: 1.1947 | LR: 5.00e-05 +[2026-04-25 19:46:00] Epoch 1 | Step 3540 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:46:03] Epoch 1 | Step 3550 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:46:05] Epoch 1 | Step 3560 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:46:08] Epoch 1 | Step 3570 | Loss: 1.1943 | LR: 5.00e-05 +[2026-04-25 19:46:11] Epoch 1 | Step 3580 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:13] Epoch 1 | Step 3590 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:16] Epoch 1 | Step 3600 | Loss: 1.1940 | LR: 5.00e-05 +[2026-04-25 19:46:18] Epoch 1 | Step 3610 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:46:21] Epoch 1 | Step 3620 | Loss: 1.1938 | LR: 5.00e-05 +[2026-04-25 19:46:23] Epoch 1 | Step 3630 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:26] Epoch 1 | Step 3640 | Loss: 1.1945 | LR: 5.00e-05 +[2026-04-25 19:46:28] Epoch 1 | Step 3650 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:46:31] Epoch 1 | Step 3660 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:46:33] Epoch 1 | Step 3670 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:36] Epoch 1 | Step 3680 | Loss: 1.1943 | LR: 5.00e-05 +[2026-04-25 19:46:38] Epoch 1 | Step 3690 | Loss: 1.1941 | LR: 5.00e-05 +[2026-04-25 19:46:41] Epoch 1 | Step 3700 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:46:43] Epoch 1 | Step 3710 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:46:46] Epoch 1 | Step 3720 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:46:48] Epoch 1 | Step 3730 | Loss: 1.1941 | LR: 5.00e-05 +[2026-04-25 19:46:51] Epoch 1 | Step 3740 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:53] Epoch 1 | Step 3750 | Loss: 1.1940 | LR: 5.00e-05 +[2026-04-25 19:46:56] Epoch 1 | Step 3760 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:46:59] Epoch 1 | Step 3770 | Loss: 1.1944 | LR: 5.00e-05 +[2026-04-25 19:47:01] Epoch 1 | Step 3780 | Loss: 1.1945 | LR: 5.00e-05 +[2026-04-25 19:47:04] Epoch 1 | Step 3790 | Loss: 1.1946 | LR: 5.00e-05 +[2026-04-25 19:47:06] Epoch 1 | Step 3800 | Loss: 1.1948 | LR: 5.00e-05 +[2026-04-25 19:47:09] Epoch 1 | Step 3810 | Loss: 1.1942 | LR: 5.00e-05 +[2026-04-25 19:47:11] Epoch 1 | Step 3820 | Loss: 1.1940 | LR: 5.00e-05 +[2026-04-25 19:47:14] Epoch 1 | Step 3830 | Loss: 1.1939 | LR: 5.00e-05 +[2026-04-25 19:47:16] Epoch 1 | Step 3840 | Loss: 1.1941 | LR: 5.00e-05 +[2026-04-25 19:47:19] Epoch 1 | Step 3850 | Loss: 1.1938 | LR: 5.00e-05 +[2026-04-25 19:47:21] Epoch 1 | Step 3860 | Loss: 1.1937 | LR: 5.00e-05 +[2026-04-25 19:47:24] Epoch 1 | Step 3870 | Loss: 1.1936 | LR: 5.00e-05 +[2026-04-25 19:47:27] Epoch 1 | Step 3880 | Loss: 1.1932 | LR: 5.00e-05 +[2026-04-25 19:47:29] Epoch 1 | Step 3890 | Loss: 1.1931 | LR: 5.00e-05 +[2026-04-25 19:47:32] Epoch 1 | Step 3900 | Loss: 1.1932 | LR: 5.00e-05 +[2026-04-25 19:47:34] Epoch 1 | Step 3910 | Loss: 1.1934 | LR: 5.00e-05 +[2026-04-25 19:47:37] Epoch 1 | Step 3920 | Loss: 1.1936 | LR: 5.00e-05 +[2026-04-25 19:47:39] Epoch 1 | Step 3930 | Loss: 1.1935 | LR: 5.00e-05 +[2026-04-25 19:47:42] Epoch 1 | Step 3940 | Loss: 1.1935 | LR: 5.00e-05 +[2026-04-25 19:47:45] Epoch 1 | Step 3950 | Loss: 1.1933 | LR: 5.00e-05 +[2026-04-25 19:47:47] Epoch 1 | Step 3960 | Loss: 1.1932 | LR: 5.00e-05 +[2026-04-25 19:47:50] Epoch 1 | Step 3970 | Loss: 1.1931 | LR: 5.00e-05 +[2026-04-25 19:47:53] Epoch 1 | Step 3980 | Loss: 1.1932 | LR: 4.99e-05 +[2026-04-25 19:47:55] Epoch 1 | Step 3990 | Loss: 1.1930 | LR: 4.99e-05 +[2026-04-25 19:47:57] Epoch 1 | Step 4000 | Loss: 1.1930 | LR: 4.98e-05 +[2026-04-25 19:47:58] Validation | Batch 10/84 | Loss: 1.1351 +[2026-04-25 19:47:58] Validation | Batch 20/84 | Loss: 1.1323 +[2026-04-25 19:47:59] Validation | Batch 30/84 | Loss: 1.2164 +[2026-04-25 19:47:59] Validation | Batch 40/84 | Loss: 1.2215 +[2026-04-25 19:48:00] Validation | Batch 50/84 | Loss: 1.2167 +[2026-04-25 19:48:00] Validation | Batch 60/84 | Loss: 1.1898 +[2026-04-25 19:48:01] Validation | Batch 70/84 | Loss: 1.1702 +[2026-04-25 19:48:01] Validation | Batch 80/84 | Loss: 1.1787 +[2026-04-25 19:48:01] Validation | Batch 84/84 | Loss: 1.1692 +[2026-04-25 19:48:02] Validation | Loss: 1.1692 | PPL: 3.30 | Time: 3.78s +[2026-04-25 19:48:04] New best model saved! Val loss: 1.1692 +[2026-04-25 19:48:06] Epoch 1 | Step 4010 | Loss: 1.1929 | LR: 4.97e-05 +[2026-04-25 19:48:09] Epoch 1 | Step 4020 | Loss: 1.1930 | LR: 4.95e-05 +[2026-04-25 19:48:11] Epoch 1 | Step 4030 | Loss: 1.1927 | LR: 4.94e-05 +[2026-04-25 19:48:14] Epoch 1 | Step 4040 | Loss: 1.1924 | LR: 4.92e-05 +[2026-04-25 19:48:16] Epoch 1 | Step 4050 | Loss: 1.1924 | LR: 4.90e-05 +[2026-04-25 19:48:18] Epoch 1 | Step 4060 | Loss: 1.1919 | LR: 4.88e-05 +[2026-04-25 19:48:21] Epoch 1 | Step 4070 | Loss: 1.1920 | LR: 4.85e-05 +[2026-04-25 19:48:23] Epoch 1 | Step 4080 | Loss: 1.1921 | LR: 4.82e-05 +[2026-04-25 19:48:26] Epoch 1 | Step 4090 | Loss: 1.1921 | LR: 4.80e-05 +[2026-04-25 19:48:28] Epoch 1 | Step 4100 | Loss: 1.1923 | LR: 4.77e-05 +[2026-04-25 19:48:31] Epoch 1 | Step 4110 | Loss: 1.1922 | LR: 4.73e-05 +[2026-04-25 19:48:34] Epoch 1 | Step 4120 | Loss: 1.1924 | LR: 4.70e-05 +[2026-04-25 19:48:36] Epoch 1 | Step 4130 | Loss: 1.1922 | LR: 4.66e-05 +[2026-04-25 19:48:39] Epoch 1 | Step 4140 | Loss: 1.1923 | LR: 4.62e-05 +[2026-04-25 19:48:42] Epoch 1 | Step 4150 | Loss: 1.1929 | LR: 4.58e-05 +[2026-04-25 19:48:44] Epoch 1 | Step 4160 | Loss: 1.1932 | LR: 4.54e-05 +[2026-04-25 19:48:47] Epoch 1 | Step 4170 | Loss: 1.1931 | LR: 4.49e-05 +[2026-04-25 19:48:50] Epoch 1 | Step 4180 | Loss: 1.1931 | LR: 4.45e-05 +[2026-04-25 19:48:52] Epoch 1 | Step 4190 | Loss: 1.1930 | LR: 4.40e-05 +[2026-04-25 19:48:55] Epoch 1 | Step 4200 | Loss: 1.1934 | LR: 4.35e-05 +[2026-04-25 19:48:57] Epoch 1 | Step 4210 | Loss: 1.1932 | LR: 4.30e-05 +[2026-04-25 19:49:00] Epoch 1 | Step 4220 | Loss: 1.1937 | LR: 4.25e-05 +[2026-04-25 19:49:03] Epoch 1 | Step 4230 | Loss: 1.1937 | LR: 4.19e-05 +[2026-04-25 19:49:06] Epoch 1 | Step 4240 | Loss: 1.1939 | LR: 4.14e-05 +[2026-04-25 19:49:08] Epoch 1 | Step 4250 | Loss: 1.1939 | LR: 4.08e-05 +[2026-04-25 19:49:11] Epoch 1 | Step 4260 | Loss: 1.1935 | LR: 4.02e-05 +[2026-04-25 19:49:13] Epoch 1 | Step 4270 | Loss: 1.1937 | LR: 3.96e-05 +[2026-04-25 19:49:16] Epoch 1 | Step 4280 | Loss: 1.1936 | LR: 3.90e-05 +[2026-04-25 19:49:18] Epoch 1 | Step 4290 | Loss: 1.1933 | LR: 3.84e-05 +[2026-04-25 19:49:21] Epoch 1 | Step 4300 | Loss: 1.1932 | LR: 3.78e-05 +[2026-04-25 19:49:23] Epoch 1 | Step 4310 | Loss: 1.1935 | LR: 3.71e-05 +[2026-04-25 19:49:26] Epoch 1 | Step 4320 | Loss: 1.1936 | LR: 3.65e-05 +[2026-04-25 19:49:28] Epoch 1 | Step 4330 | Loss: 1.1934 | LR: 3.58e-05 +[2026-04-25 19:49:31] Epoch 1 | Step 4340 | Loss: 1.1932 | LR: 3.52e-05 +[2026-04-25 19:49:33] Epoch 1 | Step 4350 | Loss: 1.1929 | LR: 3.45e-05 +[2026-04-25 19:49:36] Epoch 1 | Step 4360 | Loss: 1.1928 | LR: 3.38e-05 +[2026-04-25 19:49:38] Epoch 1 | Step 4370 | Loss: 1.1928 | LR: 3.31e-05 +[2026-04-25 19:49:41] Epoch 1 | Step 4380 | Loss: 1.1927 | LR: 3.24e-05 +[2026-04-25 19:49:43] Epoch 1 | Step 4390 | Loss: 1.1927 | LR: 3.17e-05 +[2026-04-25 19:49:46] Epoch 1 | Step 4400 | Loss: 1.1924 | LR: 3.10e-05 +[2026-04-25 19:49:48] Epoch 1 | Step 4410 | Loss: 1.1919 | LR: 3.03e-05 +[2026-04-25 19:49:51] Epoch 1 | Step 4420 | Loss: 1.1922 | LR: 2.96e-05 +[2026-04-25 19:49:53] Epoch 1 | Step 4430 | Loss: 1.1920 | LR: 2.89e-05 +[2026-04-25 19:49:56] Epoch 1 | Step 4440 | Loss: 1.1923 | LR: 2.82e-05 +[2026-04-25 19:49:59] Epoch 1 | Step 4450 | Loss: 1.1921 | LR: 2.74e-05 +[2026-04-25 19:50:01] Epoch 1 | Step 4460 | Loss: 1.1924 | LR: 2.67e-05 +[2026-04-25 19:50:04] Epoch 1 | Step 4470 | Loss: 1.1921 | LR: 2.60e-05 +[2026-04-25 19:50:06] Epoch 1 | Step 4480 | Loss: 1.1918 | LR: 2.53e-05 +[2026-04-25 19:50:09] Epoch 1 | Step 4490 | Loss: 1.1916 | LR: 2.46e-05 +[2026-04-25 19:50:12] Epoch 1 | Step 4500 | Loss: 1.1916 | LR: 2.39e-05 +[2026-04-25 19:50:14] Epoch 1 | Step 4510 | Loss: 1.1911 | LR: 2.32e-05 +[2026-04-25 19:50:16] Epoch 1 | Step 4520 | Loss: 1.1908 | LR: 2.25e-05 +[2026-04-25 19:50:20] Epoch 1 | Step 4530 | Loss: 1.1904 | LR: 2.18e-05 +[2026-04-25 19:50:22] Epoch 1 | Step 4540 | Loss: 1.1902 | LR: 2.11e-05 +[2026-04-25 19:50:25] Epoch 1 | Step 4550 | Loss: 1.1898 | LR: 2.04e-05 +[2026-04-25 19:50:28] Epoch 1 | Step 4560 | Loss: 1.1897 | LR: 1.97e-05 +[2026-04-25 19:50:30] Epoch 1 | Step 4570 | Loss: 1.1896 | LR: 1.91e-05 +[2026-04-25 19:50:32] Epoch 1 | Step 4580 | Loss: 1.1894 | LR: 1.84e-05 +[2026-04-25 19:50:35] Epoch 1 | Step 4590 | Loss: 1.1892 | LR: 1.78e-05 +[2026-04-25 19:50:37] Epoch 1 | Step 4600 | Loss: 1.1889 | LR: 1.71e-05 +[2026-04-25 19:50:40] Epoch 1 | Step 4610 | Loss: 1.1886 | LR: 1.65e-05 +[2026-04-25 19:50:42] Epoch 1 | Step 4620 | Loss: 1.1886 | LR: 1.59e-05 +[2026-04-25 19:50:45] Epoch 1 | Step 4630 | Loss: 1.1884 | LR: 1.53e-05 +[2026-04-25 19:50:47] Epoch 1 | Step 4640 | Loss: 1.1883 | LR: 1.47e-05 +[2026-04-25 19:50:50] Epoch 1 | Step 4650 | Loss: 1.1882 | LR: 1.41e-05 +[2026-04-25 19:50:52] Epoch 1 | Step 4660 | Loss: 1.1880 | LR: 1.35e-05 +[2026-04-25 19:50:55] Epoch 1 | Step 4670 | Loss: 1.1877 | LR: 1.30e-05 +[2026-04-25 19:50:58] Epoch 1 | Step 4680 | Loss: 1.1877 | LR: 1.24e-05 +[2026-04-25 19:51:00] Epoch 1 | Step 4690 | Loss: 1.1874 | LR: 1.19e-05 +[2026-04-25 19:51:03] Epoch 1 | Step 4700 | Loss: 1.1875 | LR: 1.14e-05 +[2026-04-25 19:51:05] Epoch 1 | Step 4710 | Loss: 1.1872 | LR: 1.09e-05 +[2026-04-25 19:51:08] Epoch 1 | Step 4720 | Loss: 1.1871 | LR: 1.04e-05 +[2026-04-25 19:51:10] Epoch 1 | Step 4730 | Loss: 1.1870 | LR: 9.98e-06 +[2026-04-25 19:51:12] Epoch 1 | Step 4740 | Loss: 1.1867 | LR: 9.54e-06 +[2026-04-25 19:51:15] Epoch 1 | Step 4750 | Loss: 1.1866 | LR: 9.12e-06 +[2026-04-25 19:51:17] Epoch 1 | Step 4760 | Loss: 1.1863 | LR: 8.72e-06 +[2026-04-25 19:51:20] Epoch 1 | Step 4770 | Loss: 1.1858 | LR: 8.33e-06 +[2026-04-25 19:51:22] Epoch 1 | Step 4780 | Loss: 1.1859 | LR: 7.97e-06 +[2026-04-25 19:51:25] Epoch 1 | Step 4790 | Loss: 1.1858 | LR: 7.62e-06 +[2026-04-25 19:51:28] Epoch 1 | Step 4800 | Loss: 1.1854 | LR: 7.30e-06 +[2026-04-25 19:51:30] Epoch 1 | Step 4810 | Loss: 1.1849 | LR: 6.99e-06 +[2026-04-25 19:51:33] Epoch 1 | Step 4820 | Loss: 1.1846 | LR: 6.71e-06 +[2026-04-25 19:51:35] Epoch 1 | Step 4830 | Loss: 1.1842 | LR: 6.45e-06 +[2026-04-25 19:51:38] Epoch 1 | Step 4840 | Loss: 1.1840 | LR: 6.21e-06 +[2026-04-25 19:51:41] Epoch 1 | Step 4850 | Loss: 1.1842 | LR: 5.99e-06 +[2026-04-25 19:51:43] Epoch 1 | Step 4860 | Loss: 1.1842 | LR: 5.79e-06 +[2026-04-25 19:51:46] Epoch 1 | Step 4870 | Loss: 1.1843 | LR: 5.61e-06 +[2026-04-25 19:51:48] Epoch 1 | Step 4880 | Loss: 1.1840 | LR: 5.46e-06 +[2026-04-25 19:51:51] Epoch 1 | Step 4890 | Loss: 1.1837 | LR: 5.32e-06 +[2026-04-25 19:51:54] Epoch 1 | Step 4900 | Loss: 1.1836 | LR: 5.21e-06 +[2026-04-25 19:51:56] Epoch 1 | Step 4910 | Loss: 1.1834 | LR: 5.13e-06 +[2026-04-25 19:51:59] Epoch 1 | Step 4920 | Loss: 1.1832 | LR: 5.06e-06 +[2026-04-25 19:52:01] Epoch 1 | Step 4930 | Loss: 1.1830 | LR: 5.02e-06 +[2026-04-25 19:52:04] Epoch 1 | Step 4940 | Loss: 1.1829 | LR: 5.00e-06 +[2026-04-25 19:52:06] Epoch 1 | Step 4950 | Loss: 1.1828 | LR: 5.00e-06 +[2026-04-25 19:52:09] Epoch 1 | Step 4960 | Loss: 1.1828 | LR: 5.00e-06 +[2026-04-25 19:52:11] Epoch 1 | Step 4970 | Loss: 1.1825 | LR: 5.00e-06 +[2026-04-25 19:52:14] Epoch 1 | Step 4980 | Loss: 1.1824 | LR: 5.00e-06 +[2026-04-25 19:52:16] Epoch 1 | Step 4990 | Loss: 1.1820 | LR: 5.00e-06 +[2026-04-25 19:52:19] Epoch 1 | Step 5000 | Loss: 1.1821 | LR: 5.00e-06 +[2026-04-25 19:52:21] Epoch 1 | Step 5010 | Loss: 1.1819 | LR: 5.00e-06 +[2026-04-25 19:52:24] Epoch 1 | Step 5020 | Loss: 1.1816 | LR: 5.00e-06 +[2026-04-25 19:52:26] Epoch 1 | Step 5030 | Loss: 1.1814 | LR: 5.00e-06 +[2026-04-25 19:52:28] Epoch 1 | Step 5040 | Loss: 1.1811 | LR: 5.00e-06 +[2026-04-25 19:52:31] Epoch 1 | Step 5050 | Loss: 1.1809 | LR: 5.00e-06 +[2026-04-25 19:52:33] Epoch 1 | Step 5060 | Loss: 1.1807 | LR: 5.00e-06 +[2026-04-25 19:52:36] Epoch 1 | Step 5070 | Loss: 1.1805 | LR: 5.00e-06 +[2026-04-25 19:52:38] Epoch 1 | Step 5080 | Loss: 1.1805 | LR: 5.00e-06 +[2026-04-25 19:52:41] Epoch 1 | Step 5090 | Loss: 1.1804 | LR: 5.00e-06 +[2026-04-25 19:52:44] Epoch 1 | Step 5100 | Loss: 1.1800 | LR: 5.00e-06 +[2026-04-25 19:52:46] Epoch 1 | Step 5110 | Loss: 1.1798 | LR: 5.00e-06 +[2026-04-25 19:52:49] Epoch 1 | Step 5120 | Loss: 1.1799 | LR: 5.00e-06 +[2026-04-25 19:52:52] Epoch 1 | Step 5130 | Loss: 1.1796 | LR: 5.00e-06 +[2026-04-25 19:52:54] Epoch 1 | Step 5140 | Loss: 1.1794 | LR: 5.00e-06 +[2026-04-25 19:52:57] Epoch 1 | Step 5150 | Loss: 1.1791 | LR: 5.00e-06 +[2026-04-25 19:52:59] Epoch 1 | Step 5160 | Loss: 1.1786 | LR: 5.00e-06 +[2026-04-25 19:53:02] Epoch 1 | Step 5170 | Loss: 1.1785 | LR: 5.00e-06 +[2026-04-25 19:53:04] Epoch 1 | Step 5180 | Loss: 1.1783 | LR: 5.00e-06 +[2026-04-25 19:53:07] Epoch 1 | Step 5190 | Loss: 1.1783 | LR: 5.00e-06 +[2026-04-25 19:53:10] Epoch 1 | Step 5200 | Loss: 1.1782 | LR: 5.00e-06 +[2026-04-25 19:53:12] Epoch 1 | Step 5210 | Loss: 1.1780 | LR: 5.00e-06 +[2026-04-25 19:53:15] Epoch 1 | Step 5220 | Loss: 1.1779 | LR: 5.00e-06 +[2026-04-25 19:53:18] Epoch 1 | Step 5230 | Loss: 1.1778 | LR: 5.00e-06 +[2026-04-25 19:53:20] Epoch 1 | Step 5240 | Loss: 1.1777 | LR: 5.00e-06 +[2026-04-25 19:53:23] Epoch 1 | Step 5250 | Loss: 1.1777 | LR: 5.00e-06 +[2026-04-25 19:53:25] Epoch 1 | Step 5260 | Loss: 1.1775 | LR: 5.00e-06 +[2026-04-25 19:53:28] Epoch 1 | Step 5270 | Loss: 1.1773 | LR: 5.00e-06 +[2026-04-25 19:53:31] Epoch 1 | Step 5280 | Loss: 1.1770 | LR: 5.00e-06 +[2026-04-25 19:53:33] Epoch 1 | Step 5290 | Loss: 1.1766 | LR: 5.00e-06 +[2026-04-25 19:53:36] Epoch 1 | Step 5300 | Loss: 1.1764 | LR: 5.00e-06 +[2026-04-25 19:53:38] Epoch 1 | Step 5310 | Loss: 1.1764 | LR: 5.00e-06 +[2026-04-25 19:53:41] Epoch 1 | Step 5320 | Loss: 1.1761 | LR: 5.00e-06 +[2026-04-25 19:53:43] Epoch 1 | Step 5330 | Loss: 1.1760 | LR: 5.00e-06 +[2026-04-25 19:53:46] Epoch 1 | Step 5340 | Loss: 1.1758 | LR: 5.00e-06 +[2026-04-25 19:53:48] Epoch 1 | Step 5350 | Loss: 1.1756 | LR: 5.00e-06 +[2026-04-25 19:53:51] Epoch 1 | Step 5360 | Loss: 1.1756 | LR: 5.00e-06 +[2026-04-25 19:53:53] Epoch 1 | Step 5370 | Loss: 1.1754 | LR: 5.00e-06 +[2026-04-25 19:53:55] Epoch 1 | Step 5380 | Loss: 1.1752 | LR: 5.00e-06 +[2026-04-25 19:53:58] Epoch 1 | Step 5390 | Loss: 1.1749 | LR: 5.00e-06 +[2026-04-25 19:54:01] Epoch 1 | Step 5400 | Loss: 1.1746 | LR: 5.00e-06 +[2026-04-25 19:54:03] Epoch 1 | Step 5410 | Loss: 1.1745 | LR: 5.00e-06 +[2026-04-25 19:54:06] Epoch 1 | Step 5420 | Loss: 1.1742 | LR: 5.00e-06 +[2026-04-25 19:54:08] Epoch 1 | Step 5430 | Loss: 1.1741 | LR: 5.00e-06 +[2026-04-25 19:54:11] Epoch 1 | Step 5440 | Loss: 1.1741 | LR: 5.00e-06 +[2026-04-25 19:54:13] Epoch 1 | Step 5450 | Loss: 1.1742 | LR: 5.00e-06 +[2026-04-25 19:54:16] Epoch 1 | Step 5460 | Loss: 1.1739 | LR: 5.00e-06 +[2026-04-25 19:54:18] Epoch 1 | Step 5470 | Loss: 1.1736 | LR: 5.00e-06 +[2026-04-25 19:54:21] Epoch 1 | Step 5480 | Loss: 1.1736 | LR: 5.00e-06 +[2026-04-25 19:54:23] Epoch 1 | Step 5490 | Loss: 1.1735 | LR: 5.00e-06 +[2026-04-25 19:54:26] Epoch 1 | Step 5500 | Loss: 1.1734 | LR: 5.00e-06 +[2026-04-25 19:54:28] Epoch 1 | Step 5510 | Loss: 1.1735 | LR: 5.00e-06 +[2026-04-25 19:54:31] Epoch 1 | Step 5520 | Loss: 1.1733 | LR: 5.00e-06 +[2026-04-25 19:54:33] Epoch 1 | Step 5530 | Loss: 1.1732 | LR: 5.00e-06 +[2026-04-25 19:54:36] Epoch 1 | Step 5540 | Loss: 1.1727 | LR: 5.00e-06 +[2026-04-25 19:54:38] Epoch 1 | Step 5550 | Loss: 1.1726 | LR: 5.00e-06 +[2026-04-25 19:54:41] Epoch 1 | Step 5560 | Loss: 1.1723 | LR: 5.00e-06 +[2026-04-25 19:54:44] Epoch 1 | Step 5570 | Loss: 1.1724 | LR: 5.00e-06 +[2026-04-25 19:54:46] Epoch 1 | Step 5580 | Loss: 1.1721 | LR: 5.00e-06 +[2026-04-25 19:54:49] Epoch 1 | Step 5590 | Loss: 1.1718 | LR: 5.00e-06 +[2026-04-25 19:54:51] Epoch 1 | Step 5600 | Loss: 1.1719 | LR: 5.00e-06 +[2026-04-25 19:54:54] Epoch 1 | Step 5610 | Loss: 1.1719 | LR: 5.00e-06 +[2026-04-25 19:54:56] Epoch 1 | Step 5620 | Loss: 1.1716 | LR: 5.00e-06 +[2026-04-25 19:54:59] Epoch 1 | Step 5630 | Loss: 1.1715 | LR: 5.00e-06 +[2026-04-25 19:55:02] Epoch 1 | Step 5640 | Loss: 1.1714 | LR: 5.00e-06 +[2026-04-25 19:55:04] Epoch 1 | Step 5650 | Loss: 1.1713 | LR: 5.00e-06 +[2026-04-25 19:55:07] Epoch 1 | Step 5660 | Loss: 1.1709 | LR: 5.00e-06 +[2026-04-25 19:55:09] Epoch 1 | Step 5670 | Loss: 1.1708 | LR: 5.00e-06 +[2026-04-25 19:55:12] Epoch 1 | Step 5680 | Loss: 1.1704 | LR: 5.00e-06 +[2026-04-25 19:55:14] Epoch 1 | Step 5690 | Loss: 1.1704 | LR: 5.00e-06 +[2026-04-25 19:55:16] Epoch 1 | Step 5700 | Loss: 1.1702 | LR: 5.00e-06 +[2026-04-25 19:55:19] Epoch 1 | Step 5710 | Loss: 1.1702 | LR: 5.00e-06 +[2026-04-25 19:55:22] Epoch 1 | Step 5720 | Loss: 1.1701 | LR: 5.00e-06 +[2026-04-25 19:55:24] Epoch 1 | Step 5730 | Loss: 1.1700 | LR: 5.00e-06 +[2026-04-25 19:55:27] Epoch 1 | Step 5740 | Loss: 1.1700 | LR: 5.00e-06 +[2026-04-25 19:55:30] Epoch 1 | Step 5750 | Loss: 1.1698 | LR: 5.00e-06 +[2026-04-25 19:55:33] Epoch 1 | Step 5760 | Loss: 1.1697 | LR: 5.00e-06 +[2026-04-25 19:55:35] Epoch 1 | Step 5770 | Loss: 1.1697 | LR: 5.00e-06 +[2026-04-25 19:55:37] Epoch 1 | Step 5780 | Loss: 1.1694 | LR: 5.00e-06 +[2026-04-25 19:55:40] Epoch 1 | Step 5790 | Loss: 1.1695 | LR: 5.00e-06 +[2026-04-25 19:55:42] Epoch 1 | Step 5800 | Loss: 1.1697 | LR: 5.00e-06 +[2026-04-25 19:55:45] Epoch 1 | Step 5810 | Loss: 1.1696 | LR: 5.00e-06 +[2026-04-25 19:55:48] Epoch 1 | Step 5820 | Loss: 1.1693 | LR: 5.00e-06 +[2026-04-25 19:55:50] Epoch 1 | Step 5830 | Loss: 1.1690 | LR: 5.00e-06 +[2026-04-25 19:55:52] Epoch 1 | Step 5840 | Loss: 1.1691 | LR: 5.00e-06 +[2026-04-25 19:55:55] Epoch 1 | Step 5850 | Loss: 1.1690 | LR: 5.00e-06 +[2026-04-25 19:55:57] Epoch 1 | Step 5860 | Loss: 1.1688 | LR: 5.00e-06 +[2026-04-25 19:56:00] Epoch 1 | Step 5870 | Loss: 1.1688 | LR: 5.00e-06 +[2026-04-25 19:56:03] Epoch 1 | Step 5880 | Loss: 1.1688 | LR: 5.00e-06 +[2026-04-25 19:56:05] Epoch 1 | Step 5890 | Loss: 1.1687 | LR: 5.00e-06 +[2026-04-25 19:56:08] Epoch 1 | Step 5900 | Loss: 1.1685 | LR: 5.00e-06 +[2026-04-25 19:56:11] Epoch 1 | Step 5910 | Loss: 1.1684 | LR: 5.00e-06 +[2026-04-25 19:56:13] Epoch 1 | Step 5920 | Loss: 1.1681 | LR: 5.00e-06 +[2026-04-25 19:56:16] Epoch 1 | Step 5930 | Loss: 1.1681 | LR: 5.00e-06 +[2026-04-25 19:56:19] Epoch 1 | Step 5940 | Loss: 1.1679 | LR: 5.00e-06 +[2026-04-25 19:56:21] Epoch 1 | Step 5950 | Loss: 1.1680 | LR: 5.00e-06 +[2026-04-25 19:56:24] Epoch 1 | Step 5960 | Loss: 1.1679 | LR: 5.00e-06 +[2026-04-25 19:56:26] Epoch 1 | Step 5970 | Loss: 1.1679 | LR: 5.00e-06 +[2026-04-25 19:56:29] Epoch 1 | Step 5980 | Loss: 1.1677 | LR: 5.00e-06 +[2026-04-25 19:56:32] Epoch 1 | Step 5990 | Loss: 1.1678 | LR: 5.00e-06 +[2026-04-25 19:56:34] Epoch 1 | Step 6000 | Loss: 1.1676 | LR: 5.00e-06 +[2026-04-25 19:56:35] Validation | Batch 10/84 | Loss: 1.0538 +[2026-04-25 19:56:35] Validation | Batch 20/84 | Loss: 1.0550 +[2026-04-25 19:56:35] Validation | Batch 30/84 | Loss: 1.1355 +[2026-04-25 19:56:36] Validation | Batch 40/84 | Loss: 1.1377 +[2026-04-25 19:56:36] Validation | Batch 50/84 | Loss: 1.1304 +[2026-04-25 19:56:37] Validation | Batch 60/84 | Loss: 1.1020 +[2026-04-25 19:56:37] Validation | Batch 70/84 | Loss: 1.0857 +[2026-04-25 19:56:38] Validation | Batch 80/84 | Loss: 1.0935 +[2026-04-25 19:56:38] Validation | Batch 84/84 | Loss: 1.0843 +[2026-04-25 19:56:38] Validation | Loss: 1.0843 | PPL: 3.02 | Time: 3.76s +[2026-04-25 19:56:41] New best model saved! Val loss: 1.0843 +[2026-04-25 19:56:43] Epoch 1 | Step 6010 | Loss: 1.1676 | LR: 5.00e-06 +[2026-04-25 19:56:46] Epoch 1 | Step 6020 | Loss: 1.1674 | LR: 5.00e-06 +[2026-04-25 19:56:48] Epoch 1 | Step 6030 | Loss: 1.1675 | LR: 5.00e-06 +[2026-04-25 19:56:51] Epoch 1 | Step 6040 | Loss: 1.1674 | LR: 5.00e-06 +[2026-04-25 19:56:54] Epoch 1 | Step 6050 | Loss: 1.1674 | LR: 5.00e-06 +[2026-04-25 19:56:56] Epoch 1 | Step 6060 | Loss: 1.1673 | LR: 5.00e-06 +[2026-04-25 19:56:59] Epoch 1 | Step 6070 | Loss: 1.1670 | LR: 5.00e-06 +[2026-04-25 19:57:01] Epoch 1 | Step 6080 | Loss: 1.1670 | LR: 5.00e-06 +[2026-04-25 19:57:04] Epoch 1 | Step 6090 | Loss: 1.1671 | LR: 5.00e-06 +[2026-04-25 19:57:07] Epoch 1 | Step 6100 | Loss: 1.1672 | LR: 5.00e-06 +[2026-04-25 19:57:09] Epoch 1 | Step 6110 | Loss: 1.1671 | LR: 5.00e-06 +[2026-04-25 19:57:12] Epoch 1 | Step 6120 | Loss: 1.1669 | LR: 5.00e-06 +[2026-04-25 19:57:14] Epoch 1 | Step 6130 | Loss: 1.1667 | LR: 5.00e-06 +[2026-04-25 19:57:17] Epoch 1 | Step 6140 | Loss: 1.1662 | LR: 5.00e-06 +[2026-04-25 19:57:19] Epoch 1 | Step 6150 | Loss: 1.1660 | LR: 5.00e-06 +[2026-04-25 19:57:22] Epoch 1 | Step 6160 | Loss: 1.1659 | LR: 5.00e-06 +[2026-04-25 19:57:24] Epoch 1 | Step 6170 | Loss: 1.1660 | LR: 5.00e-06 +[2026-04-25 19:57:27] Epoch 1 | Step 6180 | Loss: 1.1657 | LR: 5.00e-06 +[2026-04-25 19:57:29] Epoch 1 | Step 6190 | Loss: 1.1655 | LR: 5.00e-06 +[2026-04-25 19:57:32] Epoch 1 | Step 6200 | Loss: 1.1653 | LR: 5.00e-06 +[2026-04-25 19:57:35] Epoch 1 | Step 6210 | Loss: 1.1653 | LR: 5.00e-06 +[2026-04-25 19:57:38] Epoch 1 | Step 6220 | Loss: 1.1653 | LR: 5.00e-06 +[2026-04-25 19:57:40] Epoch 1 | Step 6230 | Loss: 1.1650 | LR: 5.00e-06 +[2026-04-25 19:57:43] Epoch 1 | Step 6240 | Loss: 1.1650 | LR: 5.00e-06 +[2026-04-25 19:57:45] Epoch 1 | Step 6250 | Loss: 1.1646 | LR: 5.00e-06 +[2026-04-25 19:57:48] Epoch 1 | Step 6260 | Loss: 1.1646 | LR: 5.00e-06 +[2026-04-25 19:57:50] Epoch 1 | Step 6270 | Loss: 1.1645 | LR: 5.00e-06 +[2026-04-25 19:57:53] Epoch 1 | Step 6280 | Loss: 1.1641 | LR: 5.00e-06 +[2026-04-25 19:57:55] Epoch 1 | Step 6290 | Loss: 1.1640 | LR: 5.00e-06 +[2026-04-25 19:57:58] Epoch 1 | Step 6300 | Loss: 1.1639 | LR: 5.00e-06 +[2026-04-25 19:58:01] Epoch 1 | Step 6310 | Loss: 1.1639 | LR: 5.00e-06 +[2026-04-25 19:58:03] Epoch 1 | Step 6320 | Loss: 1.1638 | LR: 5.00e-06 +[2026-04-25 19:58:06] Epoch 1 | Step 6330 | Loss: 1.1640 | LR: 5.00e-06 +[2026-04-25 19:58:08] Epoch 1 | Step 6340 | Loss: 1.1640 | LR: 5.00e-06 +[2026-04-25 19:58:11] Epoch 1 | Step 6350 | Loss: 1.1639 | LR: 5.00e-06 +[2026-04-25 19:58:13] Epoch 1 | Step 6360 | Loss: 1.1640 | LR: 5.00e-06 +[2026-04-25 19:58:16] Epoch 1 | Step 6370 | Loss: 1.1639 | LR: 5.00e-06 +[2026-04-25 19:58:18] Epoch 1 | Step 6380 | Loss: 1.1638 | LR: 5.00e-06 +[2026-04-25 19:58:21] Epoch 1 | Step 6390 | Loss: 1.1635 | LR: 5.00e-06 +[2026-04-25 19:58:23] Epoch 1 | Step 6400 | Loss: 1.1633 | LR: 5.00e-06 +[2026-04-25 19:58:25] Epoch 1 | Step 6410 | Loss: 1.1632 | LR: 5.00e-06 +[2026-04-25 19:58:28] Epoch 1 | Step 6420 | Loss: 1.1630 | LR: 5.00e-06 +[2026-04-25 19:58:30] Epoch 1 | Step 6430 | Loss: 1.1629 | LR: 5.00e-06 +[2026-04-25 19:58:33] Epoch 1 | Step 6440 | Loss: 1.1628 | LR: 5.00e-06 +[2026-04-25 19:58:35] Epoch 1 | Step 6450 | Loss: 1.1627 | LR: 5.00e-06 +[2026-04-25 19:58:38] Epoch 1 | Step 6460 | Loss: 1.1623 | LR: 5.00e-06 +[2026-04-25 19:58:40] Epoch 1 | Step 6470 | Loss: 1.1622 | LR: 5.00e-06 +[2026-04-25 19:58:43] Epoch 1 | Step 6480 | Loss: 1.1622 | LR: 5.00e-06 +[2026-04-25 19:58:45] Epoch 1 | Step 6490 | Loss: 1.1623 | LR: 5.00e-06 +[2026-04-25 19:58:48] Epoch 1 | Step 6500 | Loss: 1.1620 | LR: 5.00e-06 +[2026-04-25 19:58:50] Epoch 1 | Step 6510 | Loss: 1.1618 | LR: 5.00e-06 +[2026-04-25 19:58:53] Epoch 1 | Step 6520 | Loss: 1.1615 | LR: 5.00e-06 +[2026-04-25 19:58:55] Epoch 1 | Step 6530 | Loss: 1.1612 | LR: 5.00e-06 +[2026-04-25 19:58:58] Epoch 1 | Step 6540 | Loss: 1.1610 | LR: 5.00e-06 +[2026-04-25 19:59:00] Epoch 1 | Step 6550 | Loss: 1.1608 | LR: 5.00e-06 +[2026-04-25 19:59:03] Epoch 1 | Step 6560 | Loss: 1.1607 | LR: 5.00e-06 +[2026-04-25 19:59:05] Epoch 1 | Step 6570 | Loss: 1.1606 | LR: 5.00e-06 +[2026-04-25 19:59:08] Epoch 1 | Step 6580 | Loss: 1.1606 | LR: 5.00e-06 +[2026-04-25 19:59:10] Epoch 1 | Step 6590 | Loss: 1.1603 | LR: 5.00e-06 +[2026-04-25 19:59:13] Epoch 1 | Step 6600 | Loss: 1.1602 | LR: 5.00e-06 +[2026-04-25 19:59:16] Epoch 1 | Step 6610 | Loss: 1.1601 | LR: 5.00e-06 +[2026-04-25 19:59:18] Epoch 1 | Step 6620 | Loss: 1.1600 | LR: 5.00e-06 +[2026-04-25 19:59:21] Epoch 1 | Step 6630 | Loss: 1.1598 | LR: 5.00e-06 +[2026-04-25 19:59:23] Epoch 1 | Step 6640 | Loss: 1.1598 | LR: 5.00e-06 +[2026-04-25 19:59:25] Epoch 1 | Step 6650 | Loss: 1.1598 | LR: 5.00e-06 +[2026-04-25 19:59:28] Epoch 1 | Step 6660 | Loss: 1.1595 | LR: 5.00e-06 +[2026-04-25 19:59:31] Epoch 1 | Step 6670 | Loss: 1.1594 | LR: 5.00e-06 +[2026-04-25 19:59:33] Epoch 1 | Step 6680 | Loss: 1.1593 | LR: 5.00e-06 +[2026-04-25 19:59:36] Epoch 1 | Step 6690 | Loss: 1.1593 | LR: 5.00e-06 +[2026-04-25 19:59:38] Epoch 1 | Step 6700 | Loss: 1.1592 | LR: 5.00e-06 +[2026-04-25 19:59:41] Epoch 1 | Step 6710 | Loss: 1.1591 | LR: 5.00e-06 +[2026-04-25 19:59:43] Epoch 1 | Step 6720 | Loss: 1.1590 | LR: 5.00e-06 +[2026-04-25 19:59:46] Epoch 1 | Step 6730 | Loss: 1.1591 | LR: 5.00e-06 +[2026-04-25 19:59:48] Epoch 1 | Step 6740 | Loss: 1.1589 | LR: 5.00e-06 +[2026-04-25 19:59:51] Epoch 1 | Step 6750 | Loss: 1.1587 | LR: 5.00e-06 +[2026-04-25 19:59:53] Epoch 1 | Step 6760 | Loss: 1.1587 | LR: 5.00e-06 +[2026-04-25 19:59:56] Epoch 1 | Step 6770 | Loss: 1.1586 | LR: 5.00e-06 +[2026-04-25 19:59:58] Epoch 1 | Step 6780 | Loss: 1.1585 | LR: 5.00e-06 +[2026-04-25 20:00:01] Epoch 1 | Step 6790 | Loss: 1.1585 | LR: 5.00e-06 +[2026-04-25 20:00:03] Epoch 1 | Step 6800 | Loss: 1.1586 | LR: 5.00e-06 +[2026-04-25 20:00:06] Epoch 1 | Step 6810 | Loss: 1.1585 | LR: 5.00e-06 +[2026-04-25 20:00:08] Epoch 1 | Step 6820 | Loss: 1.1586 | LR: 5.00e-06 +[2026-04-25 20:00:11] Epoch 1 | Step 6830 | Loss: 1.1586 | LR: 5.00e-06 +[2026-04-25 20:00:14] Epoch 1 | Step 6840 | Loss: 1.1586 | LR: 5.00e-06 +[2026-04-25 20:00:16] Epoch 1 | Step 6850 | Loss: 1.1585 | LR: 5.00e-06 +[2026-04-25 20:00:19] Epoch 1 | Step 6860 | Loss: 1.1584 | LR: 5.00e-06 +[2026-04-25 20:00:21] Epoch 1 | Step 6870 | Loss: 1.1583 | LR: 5.00e-06 +[2026-04-25 20:00:24] Epoch 1 | Step 6880 | Loss: 1.1581 | LR: 5.00e-06 +[2026-04-25 20:00:26] Epoch 1 | Step 6890 | Loss: 1.1582 | LR: 5.00e-06 +[2026-04-25 20:00:29] Epoch 1 | Step 6900 | Loss: 1.1582 | LR: 5.00e-06 +[2026-04-25 20:00:31] Epoch 1 | Step 6910 | Loss: 1.1578 | LR: 5.00e-06 +[2026-04-25 20:00:34] Epoch 1 | Step 6920 | Loss: 1.1577 | LR: 5.00e-06 +[2026-04-25 20:00:36] Epoch 1 | Step 6930 | Loss: 1.1577 | LR: 5.00e-06 +[2026-04-25 20:00:39] Epoch 1 | Step 6940 | Loss: 1.1575 | LR: 5.00e-06 +[2026-04-25 20:00:41] Epoch 1 | Step 6950 | Loss: 1.1574 | LR: 5.00e-06 +[2026-04-25 20:00:44] Epoch 1 | Step 6960 | Loss: 1.1574 | LR: 5.00e-06 +[2026-04-25 20:00:47] Epoch 1 | Step 6970 | Loss: 1.1573 | LR: 5.00e-06 +[2026-04-25 20:00:49] Epoch 1 | Step 6980 | Loss: 1.1572 | LR: 5.00e-06 +[2026-04-25 20:00:51] Epoch 1 | Step 6990 | Loss: 1.1570 | LR: 5.00e-06 +[2026-04-25 20:00:54] Epoch 1 | Step 7000 | Loss: 1.1568 | LR: 5.00e-06 +[2026-04-25 20:00:56] Epoch 1 | Step 7010 | Loss: 1.1567 | LR: 5.00e-06 +[2026-04-25 20:00:59] Epoch 1 | Step 7020 | Loss: 1.1567 | LR: 5.00e-06 +[2026-04-25 20:01:01] Epoch 1 | Step 7030 | Loss: 1.1566 | LR: 5.00e-06 +[2026-04-25 20:01:04] Epoch 1 | Step 7040 | Loss: 1.1566 | LR: 5.00e-06 +[2026-04-25 20:01:06] Epoch 1 | Step 7050 | Loss: 1.1564 | LR: 5.00e-06 +[2026-04-25 20:01:09] Epoch 1 | Step 7060 | Loss: 1.1563 | LR: 5.00e-06 +[2026-04-25 20:01:11] Epoch 1 | Step 7070 | Loss: 1.1564 | LR: 5.00e-06 +[2026-04-25 20:01:14] Epoch 1 | Step 7080 | Loss: 1.1561 | LR: 5.00e-06 +[2026-04-25 20:01:16] Epoch 1 | Step 7090 | Loss: 1.1561 | LR: 5.00e-06 +[2026-04-25 20:01:19] Epoch 1 | Step 7100 | Loss: 1.1558 | LR: 5.00e-06 +[2026-04-25 20:01:22] Epoch 1 | Step 7110 | Loss: 1.1557 | LR: 5.00e-06 +[2026-04-25 20:01:24] Epoch 1 | Step 7120 | Loss: 1.1558 | LR: 5.00e-06 +[2026-04-25 20:01:27] Epoch 1 | Step 7130 | Loss: 1.1555 | LR: 5.00e-06 +[2026-04-25 20:01:29] Epoch 1 | Step 7140 | Loss: 1.1553 | LR: 5.00e-06 +[2026-04-25 20:01:31] Epoch 1 | Step 7150 | Loss: 1.1555 | LR: 5.00e-06 +[2026-04-25 20:01:34] Epoch 1 | Step 7160 | Loss: 1.1552 | LR: 5.00e-06 +[2026-04-25 20:01:37] Epoch 1 | Step 7170 | Loss: 1.1552 | LR: 5.00e-06 +[2026-04-25 20:01:39] Epoch 1 | Step 7180 | Loss: 1.1551 | LR: 5.00e-06 +[2026-04-25 20:01:42] Epoch 1 | Step 7190 | Loss: 1.1552 | LR: 5.00e-06 +[2026-04-25 20:01:44] Epoch 1 | Step 7200 | Loss: 1.1550 | LR: 5.00e-06 +[2026-04-25 20:01:47] Epoch 1 | Step 7210 | Loss: 1.1548 | LR: 5.00e-06 +[2026-04-25 20:01:50] Epoch 1 | Step 7220 | Loss: 1.1548 | LR: 5.00e-06 +[2026-04-25 20:01:52] Epoch 1 | Step 7230 | Loss: 1.1548 | LR: 5.00e-06 +[2026-04-25 20:01:55] Epoch 1 | Step 7240 | Loss: 1.1547 | LR: 5.00e-06 +[2026-04-25 20:01:58] Epoch 1 | Step 7250 | Loss: 1.1546 | LR: 5.00e-06 +[2026-04-25 20:02:00] Epoch 1 | Step 7260 | Loss: 1.1545 | LR: 5.00e-06 +[2026-04-25 20:02:03] Epoch 1 | Step 7270 | Loss: 1.1545 | LR: 5.00e-06 +[2026-04-25 20:02:05] Epoch 1 | Step 7280 | Loss: 1.1545 | LR: 5.00e-06 +[2026-04-25 20:02:08] Epoch 1 | Step 7290 | Loss: 1.1542 | LR: 5.00e-06 +[2026-04-25 20:02:10] Epoch 1 | Step 7300 | Loss: 1.1541 | LR: 5.00e-06 +[2026-04-25 20:02:13] Epoch 1 | Step 7310 | Loss: 1.1539 | LR: 5.00e-06 +[2026-04-25 20:02:16] Epoch 1 | Step 7320 | Loss: 1.1536 | LR: 5.00e-06 +[2026-04-25 20:02:18] Epoch 1 | Step 7330 | Loss: 1.1536 | LR: 5.00e-06 +[2026-04-25 20:02:21] Epoch 1 | Step 7340 | Loss: 1.1537 | LR: 5.00e-06 +[2026-04-25 20:02:23] Epoch 1 | Step 7350 | Loss: 1.1537 | LR: 5.00e-06 +[2026-04-25 20:02:26] Epoch 1 | Step 7360 | Loss: 1.1535 | LR: 5.00e-06 +[2026-04-25 20:02:29] Epoch 1 | Step 7370 | Loss: 1.1532 | LR: 5.00e-06 +[2026-04-25 20:02:31] Epoch 1 | Step 7380 | Loss: 1.1530 | LR: 5.00e-06 +[2026-04-25 20:02:34] Epoch 1 | Step 7390 | Loss: 1.1528 | LR: 5.00e-06 +[2026-04-25 20:02:36] Epoch 1 | Step 7400 | Loss: 1.1527 | LR: 5.00e-06 +[2026-04-25 20:02:39] Epoch 1 | Step 7410 | Loss: 1.1528 | LR: 5.00e-06 +[2026-04-25 20:02:41] Epoch 1 | Step 7420 | Loss: 1.1527 | LR: 5.00e-06 +[2026-04-25 20:02:44] Epoch 1 | Step 7430 | Loss: 1.1525 | LR: 5.00e-06 +[2026-04-25 20:02:46] Epoch 1 | Step 7440 | Loss: 1.1525 | LR: 5.00e-06 +[2026-04-25 20:02:49] Epoch 1 | Step 7450 | Loss: 1.1523 | LR: 5.00e-06 +[2026-04-25 20:02:51] Epoch 1 | Step 7460 | Loss: 1.1522 | LR: 5.00e-06 +[2026-04-25 20:02:54] Epoch 1 | Step 7470 | Loss: 1.1521 | LR: 5.00e-06 +[2026-04-25 20:02:56] Epoch 1 | Step 7480 | Loss: 1.1521 | LR: 5.00e-06 +[2026-04-25 20:02:59] Epoch 1 | Step 7490 | Loss: 1.1521 | LR: 5.00e-06 +[2026-04-25 20:03:01] Epoch 1 | Step 7500 | Loss: 1.1521 | LR: 5.00e-06 +[2026-04-25 20:03:04] Epoch 1 | Step 7510 | Loss: 1.1521 | LR: 5.00e-06 +[2026-04-25 20:03:06] Epoch 1 | Step 7520 | Loss: 1.1520 | LR: 5.00e-06 +[2026-04-25 20:03:09] Epoch 1 | Step 7530 | Loss: 1.1518 | LR: 5.00e-06 +[2026-04-25 20:03:11] Epoch 1 | Step 7540 | Loss: 1.1517 | LR: 5.00e-06 +[2026-04-25 20:03:14] Epoch 1 | Step 7550 | Loss: 1.1517 | LR: 5.00e-06 +[2026-04-25 20:03:16] Epoch 1 | Step 7560 | Loss: 1.1516 | LR: 5.00e-06 +[2026-04-25 20:03:19] Epoch 1 | Step 7570 | Loss: 1.1515 | LR: 5.00e-06 +[2026-04-25 20:03:21] Epoch 1 | Step 7580 | Loss: 1.1514 | LR: 5.00e-06 +[2026-04-25 20:03:24] Epoch 1 | Step 7590 | Loss: 1.1512 | LR: 5.00e-06 +[2026-04-25 20:03:26] Epoch 1 | Step 7600 | Loss: 1.1511 | LR: 5.00e-06 +[2026-04-25 20:03:28] Epoch 1 | Step 7610 | Loss: 1.1510 | LR: 5.00e-06 +[2026-04-25 20:03:31] Epoch 1 | Step 7620 | Loss: 1.1508 | LR: 5.00e-06 +[2026-04-25 20:03:33] Epoch 1 | Step 7630 | Loss: 1.1507 | LR: 5.00e-06 +[2026-04-25 20:03:36] Epoch 1 | Step 7640 | Loss: 1.1506 | LR: 5.00e-06 +[2026-04-25 20:03:39] Epoch 1 | Step 7650 | Loss: 1.1504 | LR: 5.00e-06 +[2026-04-25 20:03:41] Epoch 1 | Step 7660 | Loss: 1.1502 | LR: 5.00e-06 +[2026-04-25 20:03:44] Epoch 1 | Step 7670 | Loss: 1.1500 | LR: 5.00e-06 +[2026-04-25 20:03:46] Epoch 1 | Step 7680 | Loss: 1.1499 | LR: 5.00e-06 +[2026-04-25 20:03:49] Epoch 1 | Step 7690 | Loss: 1.1500 | LR: 5.00e-06 +[2026-04-25 20:03:52] Epoch 1 | Step 7700 | Loss: 1.1498 | LR: 5.00e-06 +[2026-04-25 20:03:55] Epoch 1 | Step 7710 | Loss: 1.1495 | LR: 5.00e-06 +[2026-04-25 20:03:57] Epoch 1 | Step 7720 | Loss: 1.1496 | LR: 5.00e-06 +[2026-04-25 20:04:00] Epoch 1 | Step 7730 | Loss: 1.1497 | LR: 5.00e-06 +[2026-04-25 20:04:02] Epoch 1 | Step 7740 | Loss: 1.1498 | LR: 5.00e-06 +[2026-04-25 20:04:05] Epoch 1 | Step 7750 | Loss: 1.1498 | LR: 5.00e-06 +[2026-04-25 20:04:08] Epoch 1 | Step 7760 | Loss: 1.1496 | LR: 5.00e-06 +[2026-04-25 20:04:10] Epoch 1 | Step 7770 | Loss: 1.1494 | LR: 5.00e-06 +[2026-04-25 20:04:13] Epoch 1 | Step 7780 | Loss: 1.1493 | LR: 5.00e-06 +[2026-04-25 20:04:15] Epoch 1 | Step 7790 | Loss: 1.1492 | LR: 5.00e-06 +[2026-04-25 20:04:18] Epoch 1 | Step 7800 | Loss: 1.1490 | LR: 5.00e-06 +[2026-04-25 20:04:20] Epoch 1 | Step 7810 | Loss: 1.1491 | LR: 5.00e-06 +[2026-04-25 20:04:23] Epoch 1 | Step 7820 | Loss: 1.1491 | LR: 5.00e-06 +[2026-04-25 20:04:25] Epoch 1 | Step 7830 | Loss: 1.1490 | LR: 5.00e-06 +[2026-04-25 20:04:28] Epoch 1 | Step 7840 | Loss: 1.1488 | LR: 5.00e-06 +[2026-04-25 20:04:31] Epoch 1 | Step 7850 | Loss: 1.1485 | LR: 5.00e-06 +[2026-04-25 20:04:33] Epoch 1 | Step 7860 | Loss: 1.1485 | LR: 5.00e-06 +[2026-04-25 20:04:36] Epoch 1 | Step 7870 | Loss: 1.1484 | LR: 5.00e-06 +[2026-04-25 20:04:38] Epoch 1 | Step 7880 | Loss: 1.1484 | LR: 5.00e-06 +[2026-04-25 20:04:41] Epoch 1 | Step 7890 | Loss: 1.1483 | LR: 5.00e-06 +[2026-04-25 20:04:44] Epoch 1 | Step 7900 | Loss: 1.1483 | LR: 5.00e-06 +[2026-04-25 20:04:47] Epoch 1 | Step 7910 | Loss: 1.1483 | LR: 5.00e-06 +[2026-04-25 20:04:50] Epoch 1 | Step 7920 | Loss: 1.1482 | LR: 5.00e-06 +[2026-04-25 20:04:52] Epoch 1 | Step 7930 | Loss: 1.1483 | LR: 5.00e-06 +[2026-04-25 20:04:55] Epoch 1 | Step 7940 | Loss: 1.1482 | LR: 5.00e-06 +[2026-04-25 20:04:57] Epoch 1 | Step 7950 | Loss: 1.1484 | LR: 5.00e-06 +[2026-04-25 20:05:00] Epoch 1 | Step 7960 | Loss: 1.1484 | LR: 5.00e-06 +[2026-04-25 20:05:02] Epoch 1 | Step 7970 | Loss: 1.1484 | LR: 5.00e-06 +[2026-04-25 20:05:05] Epoch 1 | Step 7980 | Loss: 1.1481 | LR: 5.00e-06 +[2026-04-25 20:05:07] Epoch 1 | Step 7990 | Loss: 1.1481 | LR: 5.00e-06 +[2026-04-25 20:05:10] Epoch 1 | Step 8000 | Loss: 1.1480 | LR: 5.00e-06 +[2026-04-25 20:05:10] Validation | Batch 10/84 | Loss: 1.0434 +[2026-04-25 20:05:11] Validation | Batch 20/84 | Loss: 1.0455 +[2026-04-25 20:05:11] Validation | Batch 30/84 | Loss: 1.1267 +[2026-04-25 20:05:12] Validation | Batch 40/84 | Loss: 1.1299 +[2026-04-25 20:05:12] Validation | Batch 50/84 | Loss: 1.1227 +[2026-04-25 20:05:12] Validation | Batch 60/84 | Loss: 1.0947 +[2026-04-25 20:05:13] Validation | Batch 70/84 | Loss: 1.0789 +[2026-04-25 20:05:13] Validation | Batch 80/84 | Loss: 1.0866 +[2026-04-25 20:05:13] Validation | Batch 84/84 | Loss: 1.0775 +[2026-04-25 20:05:14] Validation | Loss: 1.0775 | PPL: 3.00 | Time: 3.76s +[2026-04-25 20:05:16] New best model saved! Val loss: 1.0775 +[2026-04-25 20:05:19] Epoch 1 | Step 8010 | Loss: 1.1479 | LR: 5.00e-06 +[2026-04-25 20:05:22] Epoch 1 | Step 8020 | Loss: 1.1477 | LR: 5.00e-06 +[2026-04-25 20:05:25] Epoch 1 | Step 8030 | Loss: 1.1475 | LR: 5.00e-06 +[2026-04-25 20:05:28] Epoch 1 | Step 8040 | Loss: 1.1476 | LR: 5.00e-06 +[2026-04-25 20:05:30] Epoch 1 | Step 8050 | Loss: 1.1474 | LR: 5.00e-06 +[2026-04-25 20:05:33] Epoch 1 | Step 8060 | Loss: 1.1473 | LR: 5.00e-06 +[2026-04-25 20:05:35] Epoch 1 | Step 8070 | Loss: 1.1472 | LR: 5.00e-06 +[2026-04-25 20:05:38] Epoch 1 | Step 8080 | Loss: 1.1471 | LR: 5.00e-06 +[2026-04-25 20:05:40] Epoch 1 | Step 8090 | Loss: 1.1469 | LR: 5.00e-06 +[2026-04-25 20:05:43] Epoch 1 | Step 8100 | Loss: 1.1468 | LR: 5.00e-06 +[2026-04-25 20:05:46] Epoch 1 | Step 8110 | Loss: 1.1469 | LR: 5.00e-06 +[2026-04-25 20:05:48] Epoch 1 | Step 8120 | Loss: 1.1468 | LR: 5.00e-06 +[2026-04-25 20:05:51] Epoch 1 | Step 8130 | Loss: 1.1467 | LR: 5.00e-06 +[2026-04-25 20:05:53] Epoch 1 | Step 8140 | Loss: 1.1467 | LR: 5.00e-06 +[2026-04-25 20:05:56] Epoch 1 | Step 8150 | Loss: 1.1467 | LR: 5.00e-06 +[2026-04-25 20:05:58] Epoch 1 | Step 8160 | Loss: 1.1465 | LR: 5.00e-06 +[2026-04-25 20:06:01] Epoch 1 | Step 8170 | Loss: 1.1464 | LR: 5.00e-06 +[2026-04-25 20:06:03] Epoch 1 | Step 8180 | Loss: 1.1463 | LR: 5.00e-06 +[2026-04-25 20:06:06] Epoch 1 | Step 8190 | Loss: 1.1461 | LR: 5.00e-06 +[2026-04-25 20:06:08] Epoch 1 | Step 8200 | Loss: 1.1461 | LR: 5.00e-06 +[2026-04-25 20:06:11] Epoch 1 | Step 8210 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:13] Epoch 1 | Step 8220 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:16] Epoch 1 | Step 8230 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:18] Epoch 1 | Step 8240 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:21] Epoch 1 | Step 8250 | Loss: 1.1459 | LR: 5.00e-06 +[2026-04-25 20:06:23] Epoch 1 | Step 8260 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:26] Epoch 1 | Step 8270 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:29] Epoch 1 | Step 8280 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:31] Epoch 1 | Step 8290 | Loss: 1.1461 | LR: 5.00e-06 +[2026-04-25 20:06:34] Epoch 1 | Step 8300 | Loss: 1.1459 | LR: 5.00e-06 +[2026-04-25 20:06:36] Epoch 1 | Step 8310 | Loss: 1.1460 | LR: 5.00e-06 +[2026-04-25 20:06:38] Epoch 1 | Step 8320 | Loss: 1.1459 | LR: 5.00e-06 +[2026-04-25 20:06:41] Epoch 1 | Step 8330 | Loss: 1.1459 | LR: 5.00e-06 +[2026-04-25 20:06:43] Epoch 1 | Step 8340 | Loss: 1.1458 | LR: 5.00e-06 +[2026-04-25 20:06:46] Epoch 1 | Step 8350 | Loss: 1.1457 | LR: 5.00e-06 +[2026-04-25 20:06:48] Epoch 1 | Step 8360 | Loss: 1.1456 | LR: 5.00e-06 +[2026-04-25 20:06:51] Epoch 1 | Step 8370 | Loss: 1.1455 | LR: 5.00e-06 +[2026-04-25 20:06:53] Epoch 1 | Step 8380 | Loss: 1.1454 | LR: 5.00e-06 +[2026-04-25 20:06:55] Epoch 1 | Step 8390 | Loss: 1.1454 | LR: 5.00e-06 +[2026-04-25 20:06:58] Epoch 1 | Step 8400 | Loss: 1.1453 | LR: 5.00e-06 +[2026-04-25 20:07:01] Epoch 1 | Step 8410 | Loss: 1.1453 | LR: 5.00e-06 +[2026-04-25 20:07:03] Epoch 1 | Step 8420 | Loss: 1.1454 | LR: 5.00e-06 +[2026-04-25 20:07:06] Epoch 1 | Step 8430 | Loss: 1.1451 | LR: 5.00e-06 +[2026-04-25 20:07:08] Epoch 1 | Step 8440 | Loss: 1.1451 | LR: 5.00e-06 +[2026-04-25 20:07:11] Epoch 1 | Step 8450 | Loss: 1.1450 | LR: 5.00e-06 +[2026-04-25 20:07:13] Epoch 1 | Step 8460 | Loss: 1.1450 | LR: 5.00e-06 +[2026-04-25 20:07:16] Epoch 1 | Step 8470 | Loss: 1.1449 | LR: 5.00e-06 +[2026-04-25 20:07:18] Epoch 1 | Step 8480 | Loss: 1.1448 | LR: 5.00e-06 +[2026-04-25 20:07:21] Epoch 1 | Step 8490 | Loss: 1.1446 | LR: 5.00e-06 +[2026-04-25 20:07:23] Epoch 1 | Step 8500 | Loss: 1.1446 | LR: 5.00e-06 +[2026-04-25 20:07:26] Epoch 1 | Step 8510 | Loss: 1.1444 | LR: 5.00e-06 +[2026-04-25 20:07:29] Epoch 1 | Step 8520 | Loss: 1.1444 | LR: 5.00e-06 +[2026-04-25 20:07:31] Epoch 1 | Step 8530 | Loss: 1.1443 | LR: 5.00e-06 +[2026-04-25 20:07:34] Epoch 1 | Step 8540 | Loss: 1.1445 | LR: 5.00e-06 +[2026-04-25 20:07:36] Epoch 1 | Step 8550 | Loss: 1.1445 | LR: 5.00e-06 +[2026-04-25 20:07:39] Epoch 1 | Step 8560 | Loss: 1.1444 | LR: 5.00e-06 +[2026-04-25 20:07:41] Epoch 1 | Step 8570 | Loss: 1.1443 | LR: 5.00e-06 +[2026-04-25 20:07:44] Epoch 1 | Step 8580 | Loss: 1.1441 | LR: 5.00e-06 +[2026-04-25 20:07:46] Epoch 1 | Step 8590 | Loss: 1.1439 | LR: 5.00e-06 +[2026-04-25 20:07:49] Epoch 1 | Step 8600 | Loss: 1.1438 | LR: 5.00e-06 +[2026-04-25 20:07:52] Epoch 1 | Step 8610 | Loss: 1.1439 | LR: 5.00e-06 +[2026-04-25 20:07:54] Epoch 1 | Step 8620 | Loss: 1.1437 | LR: 5.00e-06 +[2026-04-25 20:07:57] Epoch 1 | Step 8630 | Loss: 1.1435 | LR: 5.00e-06 +[2026-04-25 20:07:59] Epoch 1 | Step 8640 | Loss: 1.1436 | LR: 5.00e-06 +[2026-04-25 20:08:02] Epoch 1 | Step 8650 | Loss: 1.1436 | LR: 5.00e-06 +[2026-04-25 20:08:04] Epoch 1 | Step 8660 | Loss: 1.1434 | LR: 5.00e-06 +[2026-04-25 20:08:07] Epoch 1 | Step 8670 | Loss: 1.1435 | LR: 5.00e-06 +[2026-04-25 20:08:09] Epoch 1 | Step 8680 | Loss: 1.1435 | LR: 5.00e-06 +[2026-04-25 20:08:12] Epoch 1 | Step 8690 | Loss: 1.1433 | LR: 5.00e-06 +[2026-04-25 20:08:14] Epoch 1 | Step 8700 | Loss: 1.1433 | LR: 5.00e-06 +[2026-04-25 20:08:17] Epoch 1 | Step 8710 | Loss: 1.1430 | LR: 5.00e-06 +[2026-04-25 20:08:19] Epoch 1 | Step 8720 | Loss: 1.1429 | LR: 5.00e-06 +[2026-04-25 20:08:22] Epoch 1 | Step 8730 | Loss: 1.1429 | LR: 5.00e-06 +[2026-04-25 20:08:24] Epoch 1 | Step 8740 | Loss: 1.1429 | LR: 5.00e-06 +[2026-04-25 20:08:27] Epoch 1 | Step 8750 | Loss: 1.1429 | LR: 5.00e-06 +[2026-04-25 20:08:29] Epoch 1 | Step 8760 | Loss: 1.1428 | LR: 5.00e-06 +[2026-04-25 20:08:32] Epoch 1 | Step 8770 | Loss: 1.1426 | LR: 5.00e-06 +[2026-04-25 20:08:35] Epoch 1 | Step 8780 | Loss: 1.1425 | LR: 5.00e-06 +[2026-04-25 20:08:37] Epoch 1 | Step 8790 | Loss: 1.1425 | LR: 5.00e-06 +[2026-04-25 20:08:40] Epoch 1 | Step 8800 | Loss: 1.1422 | LR: 5.00e-06 +[2026-04-25 20:08:43] Epoch 1 | Step 8810 | Loss: 1.1422 | LR: 5.00e-06 +[2026-04-25 20:08:45] Epoch 1 | Step 8820 | Loss: 1.1421 | LR: 5.00e-06 +[2026-04-25 20:08:48] Epoch 1 | Step 8830 | Loss: 1.1420 | LR: 5.00e-06 +[2026-04-25 20:08:51] Epoch 1 | Step 8840 | Loss: 1.1419 | LR: 5.00e-06 +[2026-04-25 20:08:53] Epoch 1 | Step 8850 | Loss: 1.1419 | LR: 5.00e-06 +[2026-04-25 20:08:56] Epoch 1 | Step 8860 | Loss: 1.1419 | LR: 5.00e-06 +[2026-04-25 20:08:58] Epoch 1 | Step 8870 | Loss: 1.1419 | LR: 5.00e-06 +[2026-04-25 20:09:01] Epoch 1 | Step 8880 | Loss: 1.1418 | LR: 5.00e-06 +[2026-04-25 20:09:03] Epoch 1 | Step 8890 | Loss: 1.1416 | LR: 5.00e-06 +[2026-04-25 20:09:05] Epoch 1 | Step 8900 | Loss: 1.1413 | LR: 5.00e-06 +[2026-04-25 20:09:08] Epoch 1 | Step 8910 | Loss: 1.1414 | LR: 5.00e-06 +[2026-04-25 20:09:11] Epoch 1 | Step 8920 | Loss: 1.1411 | LR: 5.00e-06 +[2026-04-25 20:09:13] Epoch 1 | Step 8930 | Loss: 1.1410 | LR: 5.00e-06 +[2026-04-25 20:09:16] Epoch 1 | Step 8940 | Loss: 1.1410 | LR: 5.00e-06 +[2026-04-25 20:09:18] Epoch 1 | Step 8950 | Loss: 1.1410 | LR: 5.00e-06 +[2026-04-25 20:09:21] Epoch 1 | Step 8960 | Loss: 1.1410 | LR: 5.00e-06 +[2026-04-25 20:09:23] Epoch 1 | Step 8970 | Loss: 1.1409 | LR: 5.00e-06 +[2026-04-25 20:09:26] Epoch 1 | Step 8980 | Loss: 1.1408 | LR: 5.00e-06 +[2026-04-25 20:09:28] Epoch 1 | Step 8990 | Loss: 1.1406 | LR: 5.00e-06 +[2026-04-25 20:09:31] Epoch 1 | Step 9000 | Loss: 1.1406 | LR: 5.00e-06 +[2026-04-25 20:09:34] Epoch 1 | Step 9010 | Loss: 1.1406 | LR: 5.00e-06 +[2026-04-25 20:09:36] Epoch 1 | Step 9020 | Loss: 1.1407 | LR: 5.00e-06 +[2026-04-25 20:09:39] Epoch 1 | Step 9030 | Loss: 1.1406 | LR: 5.00e-06 +[2026-04-25 20:09:41] Epoch 1 | Step 9040 | Loss: 1.1406 | LR: 5.00e-06 +[2026-04-25 20:09:44] Epoch 1 | Step 9050 | Loss: 1.1404 | LR: 5.00e-06 +[2026-04-25 20:09:46] Epoch 1 | Step 9060 | Loss: 1.1405 | LR: 5.00e-06 +[2026-04-25 20:09:49] Epoch 1 | Step 9070 | Loss: 1.1403 | LR: 5.00e-06 +[2026-04-25 20:09:51] Epoch 1 | Step 9080 | Loss: 1.1403 | LR: 5.00e-06 +[2026-04-25 20:09:53] Epoch 1 | Step 9090 | Loss: 1.1402 | LR: 5.00e-06 +[2026-04-25 20:09:56] Epoch 1 | Step 9100 | Loss: 1.1402 | LR: 5.00e-06 +[2026-04-25 20:09:59] Epoch 1 | Step 9110 | Loss: 1.1402 | LR: 5.00e-06 +[2026-04-25 20:10:01] Epoch 1 | Step 9120 | Loss: 1.1402 | LR: 5.00e-06 +[2026-04-25 20:10:03] Epoch 1 | Step 9130 | Loss: 1.1401 | LR: 5.00e-06 +[2026-04-25 20:10:06] Epoch 1 | Step 9140 | Loss: 1.1400 | LR: 5.00e-06 +[2026-04-25 20:10:08] Epoch 1 | Step 9150 | Loss: 1.1401 | LR: 5.00e-06 +[2026-04-25 20:10:11] Epoch 1 | Step 9160 | Loss: 1.1400 | LR: 5.00e-06 +[2026-04-25 20:10:13] Epoch 1 | Step 9170 | Loss: 1.1397 | LR: 5.00e-06 +[2026-04-25 20:10:16] Epoch 1 | Step 9180 | Loss: 1.1396 | LR: 5.00e-06 +[2026-04-25 20:10:19] Epoch 1 | Step 9190 | Loss: 1.1393 | LR: 5.00e-06 +[2026-04-25 20:10:21] Epoch 1 | Step 9200 | Loss: 1.1393 | LR: 5.00e-06 +[2026-04-25 20:10:24] Epoch 1 | Step 9210 | Loss: 1.1393 | LR: 5.00e-06 +[2026-04-25 20:10:26] Epoch 1 | Step 9220 | Loss: 1.1392 | LR: 5.00e-06 +[2026-04-25 20:10:29] Epoch 1 | Step 9230 | Loss: 1.1391 | LR: 5.00e-06 +[2026-04-25 20:10:31] Epoch 1 | Step 9240 | Loss: 1.1389 | LR: 5.00e-06 +[2026-04-25 20:10:34] Epoch 1 | Step 9250 | Loss: 1.1388 | LR: 5.00e-06 +[2026-04-25 20:10:37] Epoch 1 | Step 9260 | Loss: 1.1386 | LR: 5.00e-06 +[2026-04-25 20:10:39] Epoch 1 | Step 9270 | Loss: 1.1385 | LR: 5.00e-06 +[2026-04-25 20:10:42] Epoch 1 | Step 9280 | Loss: 1.1385 | LR: 5.00e-06 +[2026-04-25 20:10:44] Epoch 1 | Step 9290 | Loss: 1.1384 | LR: 5.00e-06 +[2026-04-25 20:10:47] Epoch 1 | Step 9300 | Loss: 1.1384 | LR: 5.00e-06 +[2026-04-25 20:10:49] Epoch 1 | Step 9310 | Loss: 1.1383 | LR: 5.00e-06 +[2026-04-25 20:10:52] Epoch 1 | Step 9320 | Loss: 1.1382 | LR: 5.00e-06 +[2026-04-25 20:10:54] Epoch 1 | Step 9330 | Loss: 1.1381 | LR: 5.00e-06 +[2026-04-25 20:10:57] Epoch 1 | Step 9340 | Loss: 1.1380 | LR: 5.00e-06 +[2026-04-25 20:10:59] Epoch 1 | Step 9350 | Loss: 1.1379 | LR: 5.00e-06 +[2026-04-25 20:11:02] Epoch 1 | Step 9360 | Loss: 1.1378 | LR: 5.00e-06 +[2026-04-25 20:11:05] Epoch 1 | Step 9370 | Loss: 1.1378 | LR: 5.00e-06 +[2026-04-25 20:11:07] Epoch 1 | Step 9380 | Loss: 1.1378 | LR: 5.00e-06 +[2026-04-25 20:11:10] Epoch 1 | Step 9390 | Loss: 1.1375 | LR: 5.00e-06 +[2026-04-25 20:11:12] Epoch 1 | Step 9400 | Loss: 1.1376 | LR: 5.00e-06 +[2026-04-25 20:11:15] Epoch 1 | Step 9410 | Loss: 1.1376 | LR: 5.00e-06 +[2026-04-25 20:11:17] Epoch 1 | Step 9420 | Loss: 1.1377 | LR: 5.00e-06 +[2026-04-25 20:11:20] Epoch 1 | Step 9430 | Loss: 1.1377 | LR: 5.00e-06 +[2026-04-25 20:11:22] Epoch 1 | Step 9440 | Loss: 1.1376 | LR: 5.00e-06 +[2026-04-25 20:11:25] Epoch 1 | Step 9450 | Loss: 1.1377 | LR: 5.00e-06 +[2026-04-25 20:11:27] Epoch 1 | Step 9460 | Loss: 1.1375 | LR: 5.00e-06 +[2026-04-25 20:11:30] Epoch 1 | Step 9470 | Loss: 1.1373 | LR: 5.00e-06 +[2026-04-25 20:11:33] Epoch 1 | Step 9480 | Loss: 1.1371 | LR: 5.00e-06 +[2026-04-25 20:11:35] Epoch 1 | Step 9490 | Loss: 1.1371 | LR: 5.00e-06 +[2026-04-25 20:11:37] Epoch 1 | Step 9500 | Loss: 1.1370 | LR: 5.00e-06 +[2026-04-25 20:11:40] Epoch 1 | Step 9510 | Loss: 1.1370 | LR: 5.00e-06 +[2026-04-25 20:11:43] Epoch 1 | Step 9520 | Loss: 1.1369 | LR: 5.00e-06 +[2026-04-25 20:11:45] Epoch 1 | Step 9530 | Loss: 1.1369 | LR: 5.00e-06 +[2026-04-25 20:11:48] Epoch 1 | Step 9540 | Loss: 1.1367 | LR: 5.00e-06 +[2026-04-25 20:11:50] Epoch 1 | Step 9550 | Loss: 1.1367 | LR: 5.00e-06 +[2026-04-25 20:11:53] Epoch 1 | Step 9560 | Loss: 1.1367 | LR: 5.00e-06 +[2026-04-25 20:11:55] Epoch 1 | Step 9570 | Loss: 1.1367 | LR: 5.00e-06 +[2026-04-25 20:11:58] Epoch 1 | Step 9580 | Loss: 1.1368 | LR: 5.00e-06 +[2026-04-25 20:12:00] Epoch 1 | Step 9590 | Loss: 1.1367 | LR: 5.00e-06 +[2026-04-25 20:12:03] Epoch 1 | Step 9600 | Loss: 1.1366 | LR: 5.00e-06 +[2026-04-25 20:12:06] Epoch 1 | Step 9610 | Loss: 1.1365 | LR: 5.00e-06 +[2026-04-25 20:12:09] Epoch 1 | Step 9620 | Loss: 1.1365 | LR: 5.00e-06 +[2026-04-25 20:12:11] Epoch 1 | Step 9630 | Loss: 1.1366 | LR: 5.00e-06 +[2026-04-25 20:12:14] Epoch 1 | Step 9640 | Loss: 1.1365 | LR: 5.00e-06 +[2026-04-25 20:12:17] Epoch 1 | Step 9650 | Loss: 1.1365 | LR: 5.00e-06 +[2026-04-25 20:12:20] Epoch 1 | Step 9660 | Loss: 1.1364 | LR: 5.00e-06 +[2026-04-25 20:12:22] Epoch 1 | Step 9670 | Loss: 1.1364 | LR: 5.00e-06 +[2026-04-25 20:12:25] Epoch 1 | Step 9680 | Loss: 1.1364 | LR: 5.00e-06 +[2026-04-25 20:12:27] Epoch 1 | Step 9690 | Loss: 1.1363 | LR: 5.00e-06 +[2026-04-25 20:12:30] Epoch 1 | Step 9700 | Loss: 1.1363 | LR: 5.00e-06 +[2026-04-25 20:12:32] Epoch 1 | Step 9710 | Loss: 1.1362 | LR: 5.00e-06 +[2026-04-25 20:12:35] Epoch 1 | Step 9720 | Loss: 1.1362 | LR: 5.00e-06 +[2026-04-25 20:12:37] Epoch 1 | Step 9730 | Loss: 1.1362 | LR: 5.00e-06 +[2026-04-25 20:12:40] Epoch 1 | Step 9740 | Loss: 1.1361 | LR: 5.00e-06 +[2026-04-25 20:12:42] Epoch 1 | Step 9750 | Loss: 1.1360 | LR: 5.00e-06 +[2026-04-25 20:12:45] Epoch 1 | Step 9760 | Loss: 1.1359 | LR: 5.00e-06 +[2026-04-25 20:12:47] Epoch 1 | Step 9770 | Loss: 1.1359 | LR: 5.00e-06 +[2026-04-25 20:12:50] Epoch 1 | Step 9780 | Loss: 1.1358 | LR: 5.00e-06 +[2026-04-25 20:12:52] Epoch 1 | Step 9790 | Loss: 1.1357 | LR: 5.00e-06 +[2026-04-25 20:12:55] Epoch 1 | Step 9800 | Loss: 1.1357 | LR: 5.00e-06 +[2026-04-25 20:12:58] Epoch 1 | Step 9810 | Loss: 1.1355 | LR: 5.00e-06 +[2026-04-25 20:13:00] Epoch 1 | Step 9820 | Loss: 1.1354 | LR: 5.00e-06 +[2026-04-25 20:13:03] Epoch 1 | Step 9830 | Loss: 1.1354 | LR: 5.00e-06 +[2026-04-25 20:13:06] Epoch 1 | Step 9840 | Loss: 1.1355 | LR: 5.00e-06 +[2026-04-25 20:13:08] Epoch 1 | Step 9850 | Loss: 1.1354 | LR: 5.00e-06 +[2026-04-25 20:13:11] Epoch 1 | Step 9860 | Loss: 1.1352 | LR: 5.00e-06 +[2026-04-25 20:13:13] Epoch 1 | Step 9870 | Loss: 1.1353 | LR: 5.00e-06 +[2026-04-25 20:13:16] Epoch 1 | Step 9880 | Loss: 1.1353 | LR: 5.00e-06 +[2026-04-25 20:13:18] Epoch 1 completed in 2546.41s | Loss: 1.1353 +[2026-04-25 20:13:18] +Training completed! +[2026-04-25 20:13:20] Final model: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/model_final.pt diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/requirements.txt b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f040f697230340f8a88a6e7387f7e8983d11b547 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/requirements.txt @@ -0,0 +1,245 @@ +setuptools==78.1.1 +wheel==0.45.1 +pip==25.2 +webencodings==0.5.1 +triton==3.2.0 +pytz==2025.2 +pydub==0.25.1 +pure_eval==0.2.3 +ptyprocess==0.7.0 +nvidia-ml-py==13.590.48 +nvidia-cusparselt-cu12==0.6.2 +mpmath==1.3.0 +ipython-genutils==0.2.0 +fastjsonschema==2.21.2 +brotli==1.2.0 +antlr4-python3-runtime==4.9.3 +xxhash==3.6.0 +widgetsnbextension==4.0.14 +websocket-client==1.9.0 +webcolors==24.11.1 +wcwidth==0.2.14 +urllib3==2.5.0 +uri-template==1.3.0 +tzdata==2025.2 +typing_extensions==4.15.0 +types-python-dateutil==2.9.0.20251008 +traitlets==5.14.3 +tqdm==4.67.1 +tornado==6.5.2 +tomlkit==0.13.3 +tinycss2==1.4.0 +tabulate==0.9.0 +sympy==1.13.1 +soupsieve==2.8 +sniffio==1.3.1 +smmap==5.0.2 +six==1.17.0 +shellingham==1.5.4 +Send2Trash==1.8.3 +semantic-version==2.10.0 +safetensors==0.6.2 +rpds-py==0.27.1 +rfc3986-validator==0.1.1 +regex==2025.9.18 +pyzmq==27.1.0 +PyYAML==6.0.3 +python-multipart==0.0.22 +python-json-logger==4.0.0 +python-dotenv==1.2.1 +pyparsing==3.2.5 +PyJWT==2.8.0 +Pygments==2.19.2 +pycparser==2.23 +pyarrow==22.0.0 +psutil==7.1.0 +protobuf==6.33.4 +propcache==0.4.1 +prometheus_client==0.23.1 +portalocker==3.2.0 +platformdirs==4.5.0 +pillow==11.3.0 +pexpect==4.9.0 +pathspec==1.0.4 +parso==0.8.5 +pandocfilters==1.5.1 +packaging==25.0 +orjson==3.11.6 +opt_einsum==3.4.0 +nvidia-nvtx-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nccl-cu12==2.21.5 +nvidia-curand-cu12==10.3.5.147 +nvidia-cufile-cu12==1.13.1.3 +nvidia-cufft-cu12==11.2.1.3 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cublas-cu12==12.4.5.8 +numpy==2.3.3 +ninja==1.13.0 +networkx==3.5 +nest-asyncio==1.6.0 +narwhals==2.15.0 +mypy_extensions==1.1.0 +multidict==6.7.0 +mistune==3.1.4 +mdurl==0.1.2 +MarkupSafe==3.0.3 +lxml==6.0.2 +librt==0.8.0 +lark==1.3.0 +kiwisolver==1.4.9 +jupyterlab_widgets==3.0.15 +jupyterlab_pygments==0.3.0 +jsonpointer==3.0.0 +json5==0.12.1 +itsdangerous==2.2.0 +idna==3.10 +hf-xet==1.1.10 +h11==0.16.0 +groovy==0.1.2 +fsspec==2025.9.0 +frozenlist==1.8.0 +fqdn==1.5.1 +fonttools==4.60.1 +filelock==3.19.1 +ffmpy==1.0.0 +executing==2.2.1 +einops==0.8.1 +dill==0.4.0 +defusedxml==0.7.1 +decorator==5.2.1 +debugpy==1.8.17 +dacite==1.9.2 +cycler==0.12.1 +comm==0.2.3 +colorama==0.4.6 +click==8.3.1 +charset-normalizer==3.4.3 +certifi==2025.10.5 +bleach==6.2.0 +babel==2.17.0 +attrs==25.4.0 +async-lru==2.0.5 +asttokens==3.0.0 +annotated-types==0.7.0 +annotated-doc==0.0.4 +aiohappyeyeballs==2.6.1 +aiofiles==24.1.0 +yarl==1.22.0 +uvicorn==0.40.0 +typing-inspection==0.4.2 +terminado==0.18.1 +stack-data==0.6.3 +sentry-sdk==2.50.0 +scipy==1.17.0 +sacrebleu==2.6.0 +rfc3987-syntax==1.1.0 +rfc3339-validator==0.1.4 +requests==2.32.5 +reportlab==4.4.9 +referencing==0.36.2 +python-dateutil==2.9.0.post0 +pydantic_core==2.41.5 +prompt_toolkit==3.0.52 +plotly==6.5.2 +pathlib2==2.3.7.post1 +orderedmultidict==1.0.2 +optree==0.17.0 +omegaconf==2.3.0 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-cudnn-cu12==9.1.0.70 +mypy==1.19.1 +multiprocess==0.70.16 +matplotlib-inline==0.1.7 +markdown-it-py==4.0.0 +jupyter_core==5.8.1 +Jinja2==3.1.6 +jedi==0.19.2 +ipython_pygments_lexers==1.1.1 +httpcore==1.0.9 +gitdb==4.0.12 +ftfy==6.3.1 +contourpy==1.3.3 +cffi==2.0.0 +beautifulsoup4==4.14.2 +anyio==4.11.0 +aiosignal==1.4.0 +starlette==0.50.0 +rich==14.2.0 +pydantic==2.12.5 +pandas==2.3.3 +nvidia-cusolver-cu12==11.6.1.9 +matplotlib==3.10.7 +jupyter_server_terminals==0.5.3 +jupyter_client==8.6.3 +jsonschema-specifications==2025.9.1 +ipython==9.6.0 +hydra-core==1.3.2 +huggingface-hub==0.35.3 +httpx==0.28.1 +GitPython==3.1.46 +furl==2.1.4 +cryptography==46.0.4 +arrow==1.3.0 +argon2-cffi-bindings==25.1.0 +aiohttp==3.13.1 +wandb==0.24.0 +typer==0.21.1 +torch==2.6.0 +tokenizers==0.22.1 +seaborn==0.13.2 +safehttpx==0.1.7 +jsonschema==4.25.1 +joypy==0.2.6 +isoduration==20.11.0 +ipywidgets==8.1.7 +ipykernel==6.30.1 +gradio_client==2.0.3 +fastapi==0.128.0 +Authlib==1.6.6 +argon2-cffi==25.1.0 +transformers==4.57.6 +nbformat==5.10.4 +mlstm_kernels==2.0.2 +jupyter-console==6.6.3 +gradio==6.5.1 +datasets==4.3.0 +clearml==1.16.4 +accelerate==1.10.1 +xlstm==2.0.4 +nbclient==0.10.2 +jupyter-events==0.12.0 +trackio==0.15.0 +nbconvert==7.16.6 +jupyter_server==2.17.0 +notebook_shim==0.2.4 +jupyterlab_server==2.27.3 +jupyter-lsp==2.3.0 +nbclassic==1.3.3 +jupyterlab==4.4.9 +notebook==7.4.7 +jupyter_contrib_core==0.4.2 +jupyter==1.1.1 +jupyter_nbextensions_configurator==0.6.4 +causal-conv1d==1.5.0.post8 +flash_attn==2.7.4.post1 +mamba-ssm==2.2.4 +hnet==0.0.1 +autocommand==2.2.2 +backports.tarfile==1.2.0 +importlib_metadata==8.0.0 +inflect==7.3.1 +jaraco.collections==5.1.0 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +more-itertools==10.3.0 +packaging==24.2 +platformdirs==4.2.2 +tomli==2.0.1 +typeguard==4.3.0 +typing_extensions==4.12.2 +wheel==0.45.1 +zipp==3.19.2 diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-metadata.json b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..b829533f0510ed529646de880d30cc0a21d84d33 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-metadata.json @@ -0,0 +1,70 @@ +{ + "os": "Linux-5.4.0-176-generic-x86_64-with-glibc2.35", + "python": "CPython 3.12.0", + "startedAt": "2026-04-25T19:30:45.739561Z", + "args": [ + "tracking=wandb", + "tracking.project=code-completion_lr-sweep", + "tracking.run_name=pythia_1b_lr_5e-5", + "training.lr=5e-5", + "paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5", + "model=pythia_1b", + "data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full" + ], + "program": "/workspace/byte-llms-code/code_completion_exp/train_pythia/train.py", + "codePath": "code_completion_exp/train_pythia/train.py", + "codePathLocal": "train.py", + "git": { + "remote": "https://github.com/naryst/byte-llms-code.git", + "commit": "f111e13281aa0dc58e24302edab5b0d5c2024586" + }, + "email": "nikita@local.ru", + "root": "/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5", + "host": "7504e518d24a", + "executable": "/venv/bytellm/bin/python", + "cpu_count": 64, + "cpu_count_logical": 128, + "gpu": "NVIDIA H100 80GB HBM3", + "gpu_count": 4, + "disk": { + "/": { + "total": "265214230528", + "used": "98730414080" + } + }, + "memory": { + "total": "1081679683584" + }, + "gpu_nvidia": [ + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-b60cdcab-2033-2009-41de-be646c953a20" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-9982b420-4520-4238-c378-ec5a46015474" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f" + }, + { + "name": "NVIDIA H100 80GB HBM3", + "memoryTotal": "85520809984", + "cudaCores": 16896, + "architecture": "Hopper", + "uuid": "GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134" + } + ], + "cudaVersion": "12.2", + "writerId": "ustumeirj564la8awm2vaziyvcmzba88" +} \ No newline at end of file diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-summary.json b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..824077fa4a130c9cf27802bab66a2d9b09d5ac50 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/files/wandb-summary.json @@ -0,0 +1 @@ +{"_step":9880,"train/epoch":1,"epoch/loss":1.1352570770220702,"val/time":3.759830951690674,"val/perplexity":3.001287626641245,"best/val_perplexity":3.001287626641245,"train/loss":0.8197988495230675,"epoch/time":2546.4066140651703,"train/lr":5e-06,"_timestamp":1.7771479983385293e+09,"_wandb":{"runtime":2553},"val/loss":1.0774706463728632,"train/step_time":0.25243563652038575,"best/val_loss":1.0774706463728632,"train/loss_avg":1.1352740390983298,"_runtime":2553,"best/step":8000} \ No newline at end of file diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-core.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-core.log new file mode 100644 index 0000000000000000000000000000000000000000..6373626872d58fbfcc50f45e35116cc5d8b2ceae --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-core.log @@ -0,0 +1,16 @@ +{"time":"2026-04-25T19:30:45.8246501Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmp8z8yype5/port-108122.txt","pid":108122,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false,"enable-dcgm-profiling":false} +{"time":"2026-04-25T19:30:45.825628337Z","level":"INFO","msg":"server: will exit if parent process dies","ppid":108122} +{"time":"2026-04-25T19:30:45.825607778Z","level":"INFO","msg":"server: accepting connections","addr":{"Name":"/tmp/wandb-108122-108184-1418996074/socket","Net":"unix"}} +{"time":"2026-04-25T19:30:46.013214659Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"1(@)"} +{"time":"2026-04-25T19:30:46.03786202Z","level":"INFO","msg":"handleInformInit: received","streamId":"vg3if73m","id":"1(@)"} +{"time":"2026-04-25T19:30:46.577119753Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"vg3if73m","id":"1(@)"} +{"time":"2026-04-25T20:13:21.759412864Z","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"vg3if73m","id":"1(@)"} +{"time":"2026-04-25T20:13:21.759969683Z","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"vg3if73m","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778021099Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778053018Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778058724Z","level":"INFO","msg":"server is shutting down"} +{"time":"2026-04-25T20:13:21.778064728Z","level":"INFO","msg":"connection: closing","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778117411Z","level":"INFO","msg":"server: listener closed","addr":{"Name":"/tmp/wandb-108122-108184-1418996074/socket","Net":"unix"}} +{"time":"2026-04-25T20:13:21.778156925Z","level":"INFO","msg":"connection: closed successfully","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778163347Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"1(@)"} +{"time":"2026-04-25T20:13:21.778168935Z","level":"INFO","msg":"server is closed"} diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-internal.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..a1276ba167adb0b0b26f4b43aa2552cc3d90da0b --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-internal.log @@ -0,0 +1,13 @@ +{"time":"2026-04-25T19:30:46.037958475Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"} +{"time":"2026-04-25T19:30:46.57692305Z","level":"INFO","msg":"stream: created new stream","id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.576990227Z","level":"INFO","msg":"handler: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577113222Z","level":"INFO","msg":"stream: started","id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577131984Z","level":"INFO","msg":"sender: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.577130838Z","level":"INFO","msg":"writer: started","stream_id":"vg3if73m"} +{"time":"2026-04-25T19:30:46.758521679Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"} +{"time":"2026-04-25T20:13:21.644248736Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2026-04-25T20:13:21.756609599Z","level":"INFO","msg":"handler: operation stats","stats":{}} +{"time":"2026-04-25T20:13:21.759436838Z","level":"INFO","msg":"stream: closing","id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759447359Z","level":"INFO","msg":"handler: closed","stream_id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759544665Z","level":"INFO","msg":"sender: closed","stream_id":"vg3if73m"} +{"time":"2026-04-25T20:13:21.759549885Z","level":"INFO","msg":"stream: closed","id":"vg3if73m"} diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug.log b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..0491c6a67e1b9682511602a186734db8ab3a1eaa --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug.log @@ -0,0 +1,24 @@ +2026-04-25 19:30:45,740 INFO MainThread:108122 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0 +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_setup.py:_flush():81] Configure stats pid to 108122 +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_setup.py:_flush():81] Loading settings from environment variables +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug.log +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/logs/debug-internal.log +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():844] calling init triggers +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():849] wandb.init called with sweep_config: {} +config: {'model': {'name': 'EleutherAI/pythia-1b', 'checkpoint_path': None, 'from_scratch': False}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 12, 'gradient_accumulation_steps': 4, 'lr': 5e-05, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 4, 'pin_memory': True, 'max_train_samples': None, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 0, 'eval_interval': 2000, 'save_every_epoch': False}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'pythia_1b_lr_5e-5', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_pythia/train.py'}} +2026-04-25 19:30:45,741 INFO MainThread:108122 [wandb_init.py:init():892] starting backend +2026-04-25 19:30:46,013 INFO MainThread:108122 [wandb_init.py:init():895] sending inform_init request +2026-04-25 19:30:46,036 INFO MainThread:108122 [wandb_init.py:init():903] backend started and connected +2026-04-25 19:30:46,039 INFO MainThread:108122 [wandb_init.py:init():973] updated telemetry +2026-04-25 19:30:46,057 INFO MainThread:108122 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout +2026-04-25 19:30:46,757 INFO MainThread:108122 [wandb_init.py:init():1044] starting run threads in backend +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_console_start():2529] atexit reg +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2377] redirect: wrap_raw +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2446] Wrapping output streams. +2026-04-25 19:30:46,918 INFO MainThread:108122 [wandb_run.py:_redirect():2469] Redirects installed. +2026-04-25 19:30:46,921 INFO MainThread:108122 [wandb_init.py:init():1084] run started, returning control to user process +2026-04-25 20:13:20,732 INFO MainThread:108122 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/vg3if73m +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0 +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_restore():2476] restore +2026-04-25 20:13:20,733 INFO MainThread:108122 [wandb_run.py:_restore():2482] restore done +2026-04-25 20:13:21,758 INFO MainThread:108122 [wandb_run.py:_footer_sync_info():3870] logging synced files diff --git a/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/run-vg3if73m.wandb b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/run-vg3if73m.wandb new file mode 100644 index 0000000000000000000000000000000000000000..8727204332a98f782e73f0e124f37e9111772947 --- /dev/null +++ b/lr_sweep/pythia_1b_lr_5e-5/wandb/run-20260425_193045-vg3if73m/run-vg3if73m.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d764714cf63c8157e718fa5dbe97499c6d9c22fa95b783249f74e08fdb1f64 +size 1257774