{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "608b26fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !sudo apt install libopenmpi-dev -y\n",
    "# !pip3 install mpi4py --user\n",
    "# !pip3 install deepspeed -U --user"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "607440e1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip3 install accelerate transformers -U --user"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "2cdb0765",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "absl-py==2.0.0\r\n",
      "accelerate==0.25.0\r\n",
      "aiofiles==23.2.1\r\n",
      "aiohttp==3.8.5\r\n",
      "aiohttp-cors==0.7.0\r\n",
      "aiorwlock==1.3.0\r\n",
      "aiosignal==1.3.1\r\n",
      "altair==5.1.2\r\n",
      "anyio==3.7.1\r\n",
      "appdirs==1.4.4\r\n",
      "argon2-cffi==23.1.0\r\n",
      "argon2-cffi-bindings==21.2.0\r\n",
      "asttokens==2.2.1\r\n",
      "async-timeout==4.0.3\r\n",
      "attributedict==0.3.0\r\n",
      "attrs==23.1.0\r\n",
      "autoawq @ https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.6/autoawq-0.1.6+cu118-cp310-cp310-linux_x86_64.whl\r\n",
      "azure-core==1.29.5\r\n",
      "azure-identity==1.15.0\r\n",
      "azure-storage-blob==12.18.3\r\n",
      "azure-storage-file-datalake==12.13.2\r\n",
      "backcall==0.2.0\r\n",
      "bcrypt==4.0.1\r\n",
      "beautifulsoup4==4.12.2\r\n",
      "bitsandbytes==0.41.0\r\n",
      "bleach==6.0.0\r\n",
      "blessed==1.20.0\r\n",
      "blessings==1.7\r\n",
      "boto3==1.28.78\r\n",
      "botocore==1.31.78\r\n",
      "Brotli==1.1.0\r\n",
      "cachetools==5.3.2\r\n",
      "causal-conv1d==1.0.0\r\n",
      "certifi==2022.12.7\r\n",
      "cffi==1.15.1\r\n",
      "chardet==5.2.0\r\n",
      "charset-normalizer==2.1.1\r\n",
      "circuitbreaker==1.4.0\r\n",
      "click==8.1.7\r\n",
      "cmake==3.27.7\r\n",
      "codecov==2.1.13\r\n",
      "colorama==0.4.6\r\n",
      "coloredlogs==15.0.1\r\n",
      "colorful==0.5.5\r\n",
      "colour-runner==0.1.1\r\n",
      "comm==0.1.4\r\n",
      "contourpy==1.2.0\r\n",
      "coverage==7.3.2\r\n",
      "cryptography==41.0.5\r\n",
      "cycler==0.12.1\r\n",
      "DataProperty==1.0.1\r\n",
      "datasets==2.14.6\r\n",
      "debugpy==1.6.7.post1\r\n",
      "decorator==5.1.1\r\n",
      "deepdiff==6.6.1\r\n",
      "deepspeed==0.12.4\r\n",
      "defusedxml==0.7.1\r\n",
      "dill==0.3.7\r\n",
      "distlib==0.3.7\r\n",
      "distro==1.7.0\r\n",
      "docker-pycreds==0.4.0\r\n",
      "einops==0.6.1\r\n",
      "einops-exts==0.0.4\r\n",
      "evaluate==0.4.1\r\n",
      "exceptiongroup==1.1.3\r\n",
      "executing==1.2.0\r\n",
      "fastapi==0.104.1\r\n",
      "fastjsonschema==2.18.0\r\n",
      "ffmpy==0.3.1\r\n",
      "filelock==3.13.1\r\n",
      "flash-attn==2.3.0\r\n",
      "fonttools==4.44.0\r\n",
      "frozenlist==1.4.0\r\n",
      "fsspec==2023.10.0\r\n",
      "gitdb==4.0.11\r\n",
      "GitPython==3.1.40\r\n",
      "google-api-core==2.12.0\r\n",
      "google-auth==2.23.4\r\n",
      "google-cloud-core==2.3.3\r\n",
      "google-cloud-storage==2.10.0\r\n",
      "google-crc32c==1.5.0\r\n",
      "google-resumable-media==2.6.0\r\n",
      "googleapis-common-protos==1.61.0\r\n",
      "gpustat==1.1.1\r\n",
      "gradio==3.35.2\r\n",
      "gradio_client==0.2.9\r\n",
      "grpcio==1.59.2\r\n",
      "h11==0.14.0\r\n",
      "hjson==3.1.0\r\n",
      "httpcore==0.17.3\r\n",
      "httptools==0.6.1\r\n",
      "httpx==0.24.0\r\n",
      "huggingface-hub==0.17.3\r\n",
      "humanfriendly==10.0\r\n",
      "idna==3.4\r\n",
      "inspecta==0.1.3\r\n",
      "ipykernel==6.25.1\r\n",
      "ipython==8.14.0\r\n",
      "ipython-genutils==0.2.0\r\n",
      "ipywidgets==8.1.0\r\n",
      "isodate==0.6.1\r\n",
      "jedi==0.19.0\r\n",
      "Jinja2==3.1.2\r\n",
      "jmespath==1.0.1\r\n",
      "joblib==1.3.2\r\n",
      "jsonlines==4.0.0\r\n",
      "jsonschema==4.19.0\r\n",
      "jsonschema-specifications==2023.7.1\r\n",
      "jupyter==1.0.0\r\n",
      "jupyter-console==6.6.3\r\n",
      "jupyter-server==1.18.0\r\n",
      "jupyter-server-proxy==3.2.1\r\n",
      "jupyter_client==8.3.0\r\n",
      "jupyter_core==5.3.1\r\n",
      "jupyterlab-pygments==0.2.2\r\n",
      "jupyterlab-widgets==3.0.8\r\n",
      "kiwisolver==1.4.5\r\n",
      "linkify-it-py==2.0.2\r\n",
      "lit==17.0.4\r\n",
      "# Editable install with no version control (llava==1.1.3)\r\n",
      "-e /home/ubuntu/LLaVA\r\n",
      "lm-eval==0.3.0\r\n",
      "mamba-ssm @ file:///home/ubuntu/mamba\r\n",
      "markdown-it-py==2.2.0\r\n",
      "markdown2==2.4.10\r\n",
      "MarkupSafe==2.1.3\r\n",
      "matplotlib==3.8.1\r\n",
      "matplotlib-inline==0.1.6\r\n",
      "mbstrdecoder==1.1.3\r\n",
      "mdit-py-plugins==0.3.3\r\n",
      "mdurl==0.1.2\r\n",
      "mistune==3.0.1\r\n",
      "mosaicml-streaming==0.6.1\r\n",
      "mp==0.5.0\r\n",
      "mpi4py==3.1.5\r\n",
      "mpmath==1.3.0\r\n",
      "msal==1.25.0\r\n",
      "msal-extensions==1.0.0\r\n",
      "msgpack==1.0.7\r\n",
      "msgspec==0.18.4\r\n",
      "multidict==6.0.4\r\n",
      "multiprocess==0.70.15\r\n",
      "nbclient==0.8.0\r\n",
      "nbconvert==7.7.4\r\n",
      "nbformat==5.9.2\r\n",
      "nest-asyncio==1.5.7\r\n",
      "networkx==3.0\r\n",
      "ninja==1.11.1.1\r\n",
      "nltk==3.8.1\r\n",
      "notebook==6.4.12\r\n",
      "numexpr==2.8.7\r\n",
      "numpy==1.24.1\r\n",
      "nvidia-cublas-cu11==11.10.3.66\r\n",
      "nvidia-cuda-cupti-cu11==11.7.101\r\n",
      "nvidia-cuda-nvrtc-cu11==11.7.99\r\n",
      "nvidia-cuda-runtime-cu11==11.7.99\r\n",
      "nvidia-cudnn-cu11==8.5.0.96\r\n",
      "nvidia-cufft-cu11==10.9.0.58\r\n",
      "nvidia-curand-cu11==10.2.10.91\r\n",
      "nvidia-cusolver-cu11==11.4.0.1\r\n",
      "nvidia-cusparse-cu11==11.7.4.91\r\n",
      "nvidia-ml-py==12.535.133\r\n",
      "nvidia-nccl-cu11==2.14.3\r\n",
      "nvidia-nvtx-cu11==11.7.91\r\n",
      "oci==2.115.0\r\n",
      "openai==0.28.0\r\n",
      "opencensus==0.11.3\r\n",
      "opencensus-context==0.1.3\r\n",
      "ordered-set==4.1.0\r\n",
      "orjson==3.9.10\r\n",
      "packaging==23.1\r\n",
      "pandas==2.1.2\r\n",
      "pandocfilters==1.5.0\r\n",
      "paramiko==3.3.1\r\n",
      "parso==0.8.3\r\n",
      "pathvalidate==3.2.0\r\n",
      "peft==0.4.0\r\n",
      "pexpect==4.8.0\r\n",
      "pickleshare==0.7.5\r\n",
      "Pillow==9.3.0\r\n",
      "platformdirs==3.10.0\r\n",
      "pluggy==1.3.0\r\n",
      "portalocker==2.8.2\r\n",
      "prometheus-client==0.17.1\r\n",
      "prompt-toolkit==3.0.39\r\n",
      "protobuf==4.25.0\r\n",
      "psutil==5.9.5\r\n",
      "ptyprocess==0.7.0\r\n",
      "pure-eval==0.2.2\r\n",
      "py-cpuinfo==9.0.0\r\n",
      "py-spy==0.3.14\r\n",
      "pyarrow==14.0.0\r\n",
      "pyasn1==0.5.0\r\n",
      "pyasn1-modules==0.3.0\r\n",
      "pybind11==2.11.1\r\n",
      "pycountry==22.3.5\r\n",
      "pycparser==2.21\r\n",
      "pydantic==1.10.13\r\n",
      "pydub==0.25.1\r\n",
      "Pygments==2.16.1\r\n",
      "PyJWT==2.8.0\r\n",
      "PyNaCl==1.5.0\r\n",
      "pynvml==11.5.0\r\n",
      "pyOpenSSL==23.3.0\r\n",
      "pyparsing==3.1.1\r\n",
      "pyproject-api==1.6.1\r\n",
      "pytablewriter==1.2.0\r\n",
      "python-dateutil==2.8.2\r\n",
      "python-dotenv==1.0.0\r\n",
      "python-multipart==0.0.6\r\n",
      "python-snappy==0.6.1\r\n",
      "pytz==2023.3.post1\r\n",
      "PyYAML==6.0.1\r\n",
      "pyzmq==25.1.1\r\n",
      "qtconsole==5.4.3\r\n",
      "QtPy==2.3.1\r\n",
      "ray==2.8.0\r\n",
      "referencing==0.30.2\r\n",
      "regex==2023.10.3\r\n",
      "requests==2.28.1\r\n",
      "responses==0.18.0\r\n",
      "rich==13.6.0\r\n",
      "rootpath==0.1.1\r\n",
      "rouge-score==0.1.2\r\n",
      "rpds-py==0.9.2\r\n",
      "rsa==4.9\r\n",
      "s3transfer==0.7.0\r\n",
      "sacrebleu==1.5.0\r\n",
      "safetensors==0.4.0\r\n",
      "scikit-learn==1.2.2\r\n",
      "scipy==1.11.3\r\n",
      "semantic-version==2.10.0\r\n",
      "Send2Trash==1.8.2\r\n",
      "sentencepiece==0.1.99\r\n",
      "sentry-sdk==1.36.0\r\n",
      "setproctitle==1.3.3\r\n",
      "shortuuid==1.0.11\r\n",
      "simpervisor==1.0.0\r\n",
      "six==1.16.0\r\n",
      "smart-open==6.4.0\r\n",
      "smmap==5.0.1\r\n",
      "sniffio==1.3.0\r\n",
      "soupsieve==2.4.1\r\n",
      "sqlitedict==2.1.0\r\n",
      "ssh-import-id==5.11\r\n",
      "stack-data==0.6.2\r\n",
      "starlette==0.27.0\r\n",
      "svgwrite==1.4.3\r\n",
      "sympy==1.12\r\n",
      "tabledata==1.3.3\r\n",
      "tabulate==0.9.0\r\n",
      "tcolorpy==0.1.4\r\n",
      "tensorboardX==2.6.2.2\r\n",
      "termcolor==2.3.0\r\n",
      "terminado==0.17.1\r\n",
      "texttable==1.7.0\r\n",
      "threadpoolctl==3.2.0\r\n",
      "timm==0.6.13\r\n",
      "tinycss2==1.2.1\r\n",
      "tokenizers==0.14.1\r\n",
      "toml==0.10.2\r\n",
      "tomli==2.0.1\r\n",
      "toolz==0.12.0\r\n",
      "torch==2.0.1+cu118\r\n",
      "torchaudio==2.0.2+cu118\r\n",
      "torchvision==0.15.2+cu118\r\n",
      "tornado==6.3.3\r\n",
      "tox==4.11.3\r\n",
      "tqdm==4.66.1\r\n",
      "tqdm-multiprocess==0.0.11\r\n",
      "traitlets==5.9.0\r\n",
      "transformers==4.35.2\r\n",
      "triton==2.0.0\r\n",
      "typepy==1.3.2\r\n",
      "typing_extensions==4.8.0\r\n",
      "tzdata==2023.3\r\n",
      "uc-micro-py==1.0.2\r\n",
      "unzip==1.0.0\r\n",
      "urllib3==1.26.13\r\n",
      "uvicorn==0.24.0.post1\r\n",
      "uvloop==0.19.0\r\n",
      "virtualenv==20.21.0\r\n",
      "wandb==0.16.0\r\n",
      "watchfiles==0.21.0\r\n",
      "wavedrom==2.0.3.post3\r\n",
      "wcwidth==0.2.6\r\n",
      "webencodings==0.5.1\r\n",
      "websocket-client==1.6.1\r\n",
      "websockets==12.0\r\n",
      "widgetsnbextension==4.0.8\r\n",
      "xxhash==3.4.1\r\n",
      "yarl==1.9.2\r\n",
      "zstandard==0.22.0\r\n",
      "zstd==1.5.5.1\r\n"
     ]
    }
   ],
   "source": [
    "!pip3 freeze"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ecf642c4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tue Dec  5 08:09:23 2023       \r\n",
      "+-----------------------------------------------------------------------------+\r\n",
      "| NVIDIA-SMI 525.85.12    Driver Version: 525.85.12    CUDA Version: 12.0     |\r\n",
      "|-------------------------------+----------------------+----------------------+\r\n",
      "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\r\n",
      "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\r\n",
      "|                               |                      |               MIG M. |\r\n",
      "|===============================+======================+======================|\r\n",
      "|   0  NVIDIA A100 80G...  On   | 00000001:00:00.0 Off |                    0 |\r\n",
      "| N/A   44C    P0    66W / 300W |  28802MiB / 81920MiB |      0%      Default |\r\n",
      "|                               |                      |             Disabled |\r\n",
      "+-------------------------------+----------------------+----------------------+\r\n",
      "                                                                               \r\n",
      "+-----------------------------------------------------------------------------+\r\n",
      "| Processes:                                                                  |\r\n",
      "|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\r\n",
      "|        ID   ID                                                   Usage      |\r\n",
      "|=============================================================================|\r\n",
      "+-----------------------------------------------------------------------------+\r\n"
     ]
    }
   ],
   "source": [
    "!nvidia-smi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1a581ce5",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"WANDB_DISABLED\"] = \"true\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9b310aa0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Copyright (c) 2023, Albert Gu, Tri Dao.\n",
    "\n",
    "import math\n",
    "from functools import partial\n",
    "\n",
    "from collections import namedtuple\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import CrossEntropyLoss\n",
    "from transformers import PretrainedConfig\n",
    "\n",
    "from mamba_ssm.modules.mamba_simple import Mamba, Block\n",
    "from mamba_ssm.utils.generation import GenerationMixin\n",
    "from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf\n",
    "\n",
    "try:\n",
    "    from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn\n",
    "except ImportError:\n",
    "    RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None\n",
    "\n",
    "\n",
    "def create_block(\n",
    "    d_model,\n",
    "    ssm_cfg=None,\n",
    "    norm_epsilon=1e-5,\n",
    "    rms_norm=False,\n",
    "    residual_in_fp32=False,\n",
    "    fused_add_norm=False,\n",
    "    layer_idx=None,\n",
    "    device=None,\n",
    "    dtype=None,\n",
    "):\n",
    "    if ssm_cfg is None:\n",
    "        ssm_cfg = {}\n",
    "    factory_kwargs = {\"device\": device, \"dtype\": dtype}\n",
    "    mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs)\n",
    "    norm_cls = partial(\n",
    "        nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs\n",
    "    )\n",
    "    block = Block(\n",
    "        d_model,\n",
    "        mixer_cls,\n",
    "        norm_cls=norm_cls,\n",
    "        fused_add_norm=fused_add_norm,\n",
    "        residual_in_fp32=residual_in_fp32,\n",
    "    )\n",
    "    block.layer_idx = layer_idx\n",
    "    return block\n",
    "\n",
    "\n",
    "# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454\n",
    "def _init_weights(\n",
    "    module,\n",
    "    n_layer,\n",
    "    initializer_range=0.02,  # Now only used for embedding layer.\n",
    "    rescale_prenorm_residual=True,\n",
    "    n_residuals_per_layer=1,  # Change to 2 if we have MLP\n",
    "):\n",
    "    if isinstance(module, nn.Linear):\n",
    "        if module.bias is not None:\n",
    "            if not getattr(module.bias, \"_no_reinit\", False):\n",
    "                nn.init.zeros_(module.bias)\n",
    "    elif isinstance(module, nn.Embedding):\n",
    "        nn.init.normal_(module.weight, std=initializer_range)\n",
    "\n",
    "    if rescale_prenorm_residual:\n",
    "        # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:\n",
    "        #   > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale\n",
    "        #   > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.\n",
    "        #   >   -- GPT-2 :: https://openai.com/blog/better-language-models/\n",
    "        #\n",
    "        # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py\n",
    "        for name, p in module.named_parameters():\n",
    "            if name in [\"out_proj.weight\", \"fc2.weight\"]:\n",
    "                # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block\n",
    "                # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)\n",
    "                # We need to reinit p since this code could be called multiple times\n",
    "                # Having just p *= scale would repeatedly scale it down\n",
    "                nn.init.kaiming_uniform_(p, a=math.sqrt(5))\n",
    "                with torch.no_grad():\n",
    "                    p /= math.sqrt(n_residuals_per_layer * n_layer)\n",
    "\n",
    "\n",
    "class MixerModel(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        d_model: int,\n",
    "        n_layer: int,\n",
    "        vocab_size: int,\n",
    "        ssm_cfg=None,\n",
    "        norm_epsilon: float = 1e-5,\n",
    "        rms_norm: bool = False,\n",
    "        initializer_cfg=None,\n",
    "        fused_add_norm=False,\n",
    "        residual_in_fp32=False,\n",
    "        device=None,\n",
    "        dtype=None,\n",
    "    ) -> None:\n",
    "        factory_kwargs = {\"device\": device, \"dtype\": dtype}\n",
    "        super().__init__()\n",
    "        self.residual_in_fp32 = residual_in_fp32\n",
    "\n",
    "        self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs)\n",
    "\n",
    "        # We change the order of residual and layer norm:\n",
    "        # Instead of LN -> Attn / MLP -> Add, we do:\n",
    "        # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and\n",
    "        # the main branch (output of MLP / Mixer). The model definition is unchanged.\n",
    "        # This is for performance reason: we can fuse add + layer_norm.\n",
    "        self.fused_add_norm = fused_add_norm\n",
    "        if self.fused_add_norm:\n",
    "            if layer_norm_fn is None or rms_norm_fn is None:\n",
    "                raise ImportError(\"Failed to import Triton LayerNorm / RMSNorm kernels\")\n",
    "\n",
    "        self.layers = nn.ModuleList(\n",
    "            [\n",
    "                create_block(\n",
    "                    d_model,\n",
    "                    ssm_cfg=ssm_cfg,\n",
    "                    norm_epsilon=norm_epsilon,\n",
    "                    rms_norm=rms_norm,\n",
    "                    residual_in_fp32=residual_in_fp32,\n",
    "                    fused_add_norm=fused_add_norm,\n",
    "                    layer_idx=i,\n",
    "                    **factory_kwargs,\n",
    "                )\n",
    "                for i in range(n_layer)\n",
    "            ]\n",
    "        )\n",
    "\n",
    "        self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)(\n",
    "            d_model, eps=norm_epsilon, **factory_kwargs\n",
    "        )\n",
    "\n",
    "        self.apply(\n",
    "            partial(\n",
    "                _init_weights,\n",
    "                n_layer=n_layer,\n",
    "                **(initializer_cfg if initializer_cfg is not None else {}),\n",
    "            )\n",
    "        )\n",
    "\n",
    "    def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):\n",
    "        return {\n",
    "            i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)\n",
    "            for i, layer in enumerate(self.layers)\n",
    "        }\n",
    "\n",
    "    def forward(self, input_ids, inference_params=None):\n",
    "        hidden_states = self.embedding(input_ids)\n",
    "        residual = None\n",
    "        for layer in self.layers:\n",
    "            hidden_states, residual = layer(\n",
    "                hidden_states, residual, inference_params=inference_params\n",
    "            )\n",
    "        if not self.fused_add_norm:\n",
    "            residual = (hidden_states + residual) if residual is not None else hidden_states\n",
    "            hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype))\n",
    "        else:\n",
    "            # Set prenorm=False here since we don't need the residual\n",
    "            fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn\n",
    "            hidden_states = fused_add_norm_fn(\n",
    "                hidden_states,\n",
    "                self.norm_f.weight,\n",
    "                self.norm_f.bias,\n",
    "                eps=self.norm_f.eps,\n",
    "                residual=residual,\n",
    "                prenorm=False,\n",
    "                residual_in_fp32=self.residual_in_fp32,\n",
    "            )\n",
    "        return hidden_states\n",
    "\n",
    "\n",
    "class MambaLMHeadModel(nn.Module, GenerationMixin):\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        d_model: int,\n",
    "        n_layer: int,\n",
    "        vocab_size: int,\n",
    "        initializer_cfg=None,\n",
    "        pad_vocab_size_multiple: int = 1,\n",
    "        device=None,\n",
    "        dtype=None,\n",
    "        **backbone_kwargs,\n",
    "    ) -> None:\n",
    "        factory_kwargs = {\"device\": device, \"dtype\": dtype}\n",
    "        super().__init__()\n",
    "        if vocab_size % pad_vocab_size_multiple != 0:\n",
    "            vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)\n",
    "        self.backbone = MixerModel(\n",
    "            d_model=d_model,\n",
    "            n_layer=n_layer,\n",
    "            vocab_size=vocab_size,\n",
    "            initializer_cfg=initializer_cfg,\n",
    "            **backbone_kwargs,\n",
    "            **factory_kwargs,\n",
    "        )\n",
    "        self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)\n",
    "\n",
    "        # Initialize weights and apply final processing\n",
    "        self.apply(\n",
    "            partial(\n",
    "                _init_weights,\n",
    "                n_layer=n_layer,\n",
    "                **(initializer_cfg if initializer_cfg is not None else {}),\n",
    "            )\n",
    "        )\n",
    "        self.tie_weights()\n",
    "        self.config = PretrainedConfig(\n",
    "            d_model = d_model,\n",
    "            n_layer = n_layer,\n",
    "            vocab_size = vocab_size,\n",
    "            hidden_size = d_model,\n",
    "        )\n",
    "\n",
    "    def tie_weights(self):\n",
    "        self.lm_head.weight = self.backbone.embedding.weight\n",
    "\n",
    "    def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):\n",
    "        return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)\n",
    "\n",
    "    def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0, labels = None):\n",
    "        \"\"\"\n",
    "        \"position_ids\" is just to be compatible with Transformer generation. We don't use it.\n",
    "        num_last_tokens: if > 0, only return the logits for the last n tokens\n",
    "        \"\"\"\n",
    "        hidden_states = self.backbone(input_ids, inference_params=inference_params)\n",
    "        if num_last_tokens > 0:\n",
    "            hidden_states = hidden_states[:, -num_last_tokens:]\n",
    "        lm_logits = self.lm_head(hidden_states)\n",
    "        \n",
    "        loss = None\n",
    "        if labels is not None:\n",
    "            logits = lm_logits\n",
    "            # Shift so that tokens < n predict n\n",
    "            shift_logits = logits[..., :-1, :].contiguous()\n",
    "            shift_labels = labels[..., 1:].contiguous()\n",
    "            # Flatten the tokens\n",
    "            loss_fct = CrossEntropyLoss()\n",
    "            shift_logits = shift_logits.view(-1, self.config.vocab_size)\n",
    "            shift_labels = shift_labels.view(-1)\n",
    "            # Enable model parallelism\n",
    "            shift_labels = shift_labels.to(shift_logits.device)\n",
    "            loss = loss_fct(shift_logits, shift_labels)\n",
    "            print(loss, shift_logits, shift_labels)\n",
    "            return (loss,)\n",
    "            \n",
    "        else:\n",
    "            CausalLMOutput = namedtuple(\"CausalLMOutput\", [\"logits\"])\n",
    "            return CausalLMOutput(logits=lm_logits)\n",
    "\n",
    "    @classmethod\n",
    "    def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs):\n",
    "        config = load_config_hf(pretrained_model_name)\n",
    "        model = cls(**config, device=device, dtype=dtype, **kwargs)\n",
    "        model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype))\n",
    "        return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "4fe72f34",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !wget https://huggingface.co/state-spaces/mamba-130m/raw/main/config.json -O config-130m.json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "eac72d2b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "\n",
    "with open('config-130m.json') as fopen:\n",
    "    config = json.load(fopen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "b85b48be",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = MambaLMHeadModel(**{**config, 'vocab_size': 32000}, dtype = torch.bfloat16)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "0a279064",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "32000"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.config.vocab_size"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e8beea8d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from streaming import LocalDataset\n",
    "import numpy as np\n",
    "from streaming.base.format.mds.encodings import Encoding, _encodings\n",
    "\n",
    "class UInt16(Encoding):\n",
    "    def encode(self, obj) -> bytes:\n",
    "        return obj.tobytes()\n",
    "\n",
    "    def decode(self, data: bytes):\n",
    "        return np.frombuffer(data, np.uint16)\n",
    "\n",
    "_encodings['uint16'] = UInt16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "74c0f1fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-instructions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "88f14bc3",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DatasetFixed(torch.utils.data.Dataset):\n",
    "    def __init__(self, local):\n",
    "        self.dataset = LocalDataset(local=local)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        print(idx)\n",
    "        data = self.dataset[idx]\n",
    "        data['labels'] = data['input_ids'].copy()\n",
    "\n",
    "        data.pop('token_type_ids', None)\n",
    "        for k in data.keys():\n",
    "            data[k] = data[k].astype(np.int64)\n",
    "        return data\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.dataset)\n",
    "\n",
    "train_dataset = DatasetFixed(local='mosaic-instructions')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "7a777aae",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the --report_to flag to control the integrations used for logging result (for instance --report_to none).\n"
     ]
    }
   ],
   "source": [
    "from transformers import TrainingArguments, Trainer, default_data_collator\n",
    "\n",
    "output_dir = 'test-130m'\n",
    "deepspeed = {\n",
    "    \"comms_logger\": {\n",
    "        \"enabled\": True,\n",
    "        \"debug\": True\n",
    "    },\n",
    "    \"fp16\": {\n",
    "        \"enabled\": \"auto\",\n",
    "        \"loss_scale\": 0,\n",
    "        \"loss_scale_window\": 1000,\n",
    "        \"initial_scale_power\": 16,\n",
    "        \"hysteresis\": 2,\n",
    "        \"min_loss_scale\": 1\n",
    "    },\n",
    "\n",
    "    \"bf16\": {\n",
    "        \"enabled\": \"auto\"\n",
    "    },\n",
    "\n",
    "    \"optimizer\": {\n",
    "        \"type\": \"AdamW\",\n",
    "        \"params\": {\n",
    "            \"lr\": \"auto\",\n",
    "            \"betas\": \"auto\",\n",
    "            \"eps\": \"auto\",\n",
    "            \"weight_decay\": \"auto\"\n",
    "        }\n",
    "    },\n",
    "\n",
    "    \"scheduler\": {\n",
    "        \"type\": \"WarmupDecayLR\",\n",
    "        \"params\": {\n",
    "            \"warmup_min_lr\": \"auto\",\n",
    "            \"warmup_max_lr\": \"auto\",\n",
    "            \"warmup_num_steps\": \"auto\",\n",
    "            \"total_num_steps\": \"auto\",\n",
    "        }\n",
    "    },\n",
    "\n",
    "    \"zero_optimization\": {\n",
    "        \"stage\": 3,\n",
    "        \"offload_optimizer\": {\n",
    "            \"device\": \"cpu\",\n",
    "            \"pin_memory\": True\n",
    "        },\n",
    "        \"offload_param\": {\n",
    "            \"device\": \"cpu\",\n",
    "            \"pin_memory\": True\n",
    "        },\n",
    "        \"overlap_comm\": True,\n",
    "        \"contiguous_gradients\": True,\n",
    "        \"sub_group_size\": 1e8,\n",
    "        \"reduce_bucket_size\": \"auto\",\n",
    "        \"stage3_prefetch_bucket_size\": \"auto\",\n",
    "        \"stage3_param_persistence_threshold\": \"auto\",\n",
    "        \"stage3_max_live_parameters\": 1e8,\n",
    "        \"stage3_max_reuse_distance\": 1e8,\n",
    "        \"stage3_gather_16bit_weights_on_model_save\": True\n",
    "    },\n",
    "\n",
    "    \"gradient_accumulation_steps\": \"auto\",\n",
    "    \"gradient_clipping\": \"auto\",\n",
    "    \"steps_per_print\": 2000,\n",
    "    \"train_batch_size\": \"auto\",\n",
    "    \"train_micro_batch_size_per_gpu\": \"auto\",\n",
    "    \"wall_clock_breakdown\": False\n",
    "}\n",
    "\n",
    "training_args = TrainingArguments(\n",
    "    output_dir,\n",
    "    per_device_train_batch_size=2,\n",
    "    gradient_accumulation_steps=1,\n",
    "    logging_steps=1,\n",
    "    save_strategy='steps',\n",
    "    save_steps=100,\n",
    "    num_train_epochs=3,\n",
    "    learning_rate=1e-5,\n",
    "    weight_decay=1e-1,\n",
    "    warmup_steps=1000,\n",
    "    bf16=True,\n",
    "    gradient_checkpointing=False,\n",
    "    deepspeed=deepspeed,\n",
    "    save_total_limit=5,\n",
    "    log_level='debug',\n",
    "    max_steps=10,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "70b88358",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "max_steps is given, it will override any value given in num_train_epochs\n",
      "Using auto half precision backend\n"
     ]
    }
   ],
   "source": [
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=train_dataset,\n",
    "    data_collator=default_data_collator,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "35371db4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'args': TrainingArguments(\n",
       " _n_gpu=1,\n",
       " adafactor=False,\n",
       " adam_beta1=0.9,\n",
       " adam_beta2=0.999,\n",
       " adam_epsilon=1e-08,\n",
       " auto_find_batch_size=False,\n",
       " bf16=True,\n",
       " bf16_full_eval=False,\n",
       " data_seed=None,\n",
       " dataloader_drop_last=False,\n",
       " dataloader_num_workers=0,\n",
       " dataloader_pin_memory=True,\n",
       " ddp_backend=None,\n",
       " ddp_broadcast_buffers=None,\n",
       " ddp_bucket_cap_mb=None,\n",
       " ddp_find_unused_parameters=None,\n",
       " ddp_timeout=1800,\n",
       " debug=[],\n",
       " deepspeed={'comms_logger': {'enabled': True, 'debug': True}, 'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'optimizer': {'type': 'AdamW', 'params': {'lr': 'auto', 'betas': 'auto', 'eps': 'auto', 'weight_decay': 'auto'}}, 'scheduler': {'type': 'WarmupDecayLR', 'params': {'warmup_min_lr': 'auto', 'warmup_max_lr': 'auto', 'warmup_num_steps': 'auto', 'total_num_steps': 'auto'}}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'cpu', 'pin_memory': True}, 'offload_param': {'device': 'cpu', 'pin_memory': True}, 'overlap_comm': True, 'contiguous_gradients': True, 'sub_group_size': 100000000.0, 'reduce_bucket_size': 'auto', 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 100000000.0, 'stage3_max_reuse_distance': 100000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False},\n",
       " disable_tqdm=False,\n",
       " dispatch_batches=None,\n",
       " do_eval=False,\n",
       " do_predict=False,\n",
       " do_train=False,\n",
       " eval_accumulation_steps=None,\n",
       " eval_delay=0,\n",
       " eval_steps=None,\n",
       " evaluation_strategy=no,\n",
       " fp16=False,\n",
       " fp16_backend=auto,\n",
       " fp16_full_eval=False,\n",
       " fp16_opt_level=O1,\n",
       " fsdp=[],\n",
       " fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},\n",
       " fsdp_min_num_params=0,\n",
       " fsdp_transformer_layer_cls_to_wrap=None,\n",
       " full_determinism=False,\n",
       " gradient_accumulation_steps=1,\n",
       " gradient_checkpointing=False,\n",
       " gradient_checkpointing_kwargs=None,\n",
       " greater_is_better=None,\n",
       " group_by_length=False,\n",
       " half_precision_backend=auto,\n",
       " hub_always_push=False,\n",
       " hub_model_id=None,\n",
       " hub_private_repo=False,\n",
       " hub_strategy=every_save,\n",
       " hub_token=<HUB_TOKEN>,\n",
       " ignore_data_skip=False,\n",
       " include_inputs_for_metrics=False,\n",
       " include_tokens_per_second=False,\n",
       " jit_mode_eval=False,\n",
       " label_names=None,\n",
       " label_smoothing_factor=0.0,\n",
       " learning_rate=1e-05,\n",
       " length_column_name=length,\n",
       " load_best_model_at_end=False,\n",
       " local_rank=0,\n",
       " log_level=debug,\n",
       " log_level_replica=warning,\n",
       " log_on_each_node=True,\n",
       " logging_dir=test-130m/runs/Dec05_08-09-29_jupyter-0,\n",
       " logging_first_step=False,\n",
       " logging_nan_inf_filter=True,\n",
       " logging_steps=1,\n",
       " logging_strategy=steps,\n",
       " lr_scheduler_type=linear,\n",
       " max_grad_norm=1.0,\n",
       " max_steps=10,\n",
       " metric_for_best_model=None,\n",
       " mp_parameters=,\n",
       " neftune_noise_alpha=None,\n",
       " no_cuda=False,\n",
       " num_train_epochs=3,\n",
       " optim=adamw_torch,\n",
       " optim_args=None,\n",
       " output_dir=test-130m,\n",
       " overwrite_output_dir=False,\n",
       " past_index=-1,\n",
       " per_device_eval_batch_size=8,\n",
       " per_device_train_batch_size=2,\n",
       " prediction_loss_only=False,\n",
       " push_to_hub=False,\n",
       " push_to_hub_model_id=None,\n",
       " push_to_hub_organization=None,\n",
       " push_to_hub_token=<PUSH_TO_HUB_TOKEN>,\n",
       " ray_scope=last,\n",
       " remove_unused_columns=True,\n",
       " report_to=['tensorboard'],\n",
       " resume_from_checkpoint=None,\n",
       " run_name=test-130m,\n",
       " save_on_each_node=False,\n",
       " save_safetensors=True,\n",
       " save_steps=100,\n",
       " save_strategy=steps,\n",
       " save_total_limit=5,\n",
       " seed=42,\n",
       " skip_memory_metrics=True,\n",
       " split_batches=False,\n",
       " tf32=None,\n",
       " torch_compile=False,\n",
       " torch_compile_backend=None,\n",
       " torch_compile_mode=None,\n",
       " torchdynamo=None,\n",
       " tpu_metrics_debug=False,\n",
       " tpu_num_cores=None,\n",
       " use_cpu=False,\n",
       " use_ipex=False,\n",
       " use_legacy_prediction_loop=False,\n",
       " use_mps_device=False,\n",
       " warmup_ratio=0.0,\n",
       " warmup_steps=1000,\n",
       " weight_decay=0.1,\n",
       " ),\n",
       " 'hp_name': None,\n",
       " 'deepspeed': None,\n",
       " 'is_in_train': False,\n",
       " 'accelerator': <accelerate.accelerator.Accelerator at 0x7f760b839cc0>,\n",
       " 'is_deepspeed_enabled': True,\n",
       " 'is_fsdp_enabled': False,\n",
       " '_memory_tracker': <transformers.trainer_utils.TrainerMemoryTracker at 0x7f74b8310dc0>,\n",
       " 'model_init': None,\n",
       " 'is_model_parallel': False,\n",
       " 'fsdp': None,\n",
       " 'place_model_on_device': False,\n",
       " 'data_collator': <function transformers.data.data_collator.default_data_collator(features: List[transformers.data.data_collator.InputDataClass], return_tensors='pt') -> Dict[str, Any]>,\n",
       " 'train_dataset': <__main__.DatasetFixed at 0x7f74b83ef100>,\n",
       " 'eval_dataset': None,\n",
       " 'tokenizer': None,\n",
       " 'model_wrapped': MambaLMHeadModel(\n",
       "   (backbone): MixerModel(\n",
       "     (embedding): Embedding(32000, 768)\n",
       "     (layers): ModuleList(\n",
       "       (0-23): 24 x Block(\n",
       "         (mixer): Mamba(\n",
       "           (in_proj): Linear(in_features=768, out_features=3072, bias=False)\n",
       "           (conv1d): Conv1d(1536, 1536, kernel_size=(4,), stride=(1,), padding=(3,), groups=1536)\n",
       "           (act): SiLU()\n",
       "           (x_proj): Linear(in_features=1536, out_features=80, bias=False)\n",
       "           (dt_proj): Linear(in_features=48, out_features=1536, bias=True)\n",
       "           (out_proj): Linear(in_features=1536, out_features=768, bias=False)\n",
       "         )\n",
       "         (norm): RMSNorm()\n",
       "       )\n",
       "     )\n",
       "     (norm_f): RMSNorm()\n",
       "   )\n",
       "   (lm_head): Linear(in_features=768, out_features=32000, bias=False)\n",
       " ),\n",
       " 'model': MambaLMHeadModel(\n",
       "   (backbone): MixerModel(\n",
       "     (embedding): Embedding(32000, 768)\n",
       "     (layers): ModuleList(\n",
       "       (0-23): 24 x Block(\n",
       "         (mixer): Mamba(\n",
       "           (in_proj): Linear(in_features=768, out_features=3072, bias=False)\n",
       "           (conv1d): Conv1d(1536, 1536, kernel_size=(4,), stride=(1,), padding=(3,), groups=1536)\n",
       "           (act): SiLU()\n",
       "           (x_proj): Linear(in_features=1536, out_features=80, bias=False)\n",
       "           (dt_proj): Linear(in_features=48, out_features=1536, bias=True)\n",
       "           (out_proj): Linear(in_features=1536, out_features=768, bias=False)\n",
       "         )\n",
       "         (norm): RMSNorm()\n",
       "       )\n",
       "     )\n",
       "     (norm_f): RMSNorm()\n",
       "   )\n",
       "   (lm_head): Linear(in_features=768, out_features=32000, bias=False)\n",
       " ),\n",
       " 'neftune_noise_alpha': None,\n",
       " 'compute_metrics': None,\n",
       " 'preprocess_logits_for_metrics': None,\n",
       " 'optimizer': None,\n",
       " 'lr_scheduler': None,\n",
       " 'callback_handler': <transformers.trainer_callback.CallbackHandler at 0x7f74b823ad70>,\n",
       " '_loggers_initialized': False,\n",
       " 'hub_model_id': None,\n",
       " '_signature_columns': None,\n",
       " 'use_apex': False,\n",
       " 'use_cpu_amp': False,\n",
       " 'label_smoother': None,\n",
       " 'state': TrainerState(epoch=None, global_step=0, max_steps=0, logging_steps=500, eval_steps=500, save_steps=500, num_train_epochs=0, total_flos=0, log_history=[], best_metric=None, best_model_checkpoint=None, is_local_process_zero=True, is_world_process_zero=True, is_hyper_param_search=False, trial_name=None, trial_params=None),\n",
       " 'control': TrainerControl(should_training_stop=False, should_epoch_stop=False, should_save=False, should_evaluate=False, should_log=False),\n",
       " 'current_flos': 0,\n",
       " 'hp_search_backend': None,\n",
       " 'use_tune_checkpoints': False,\n",
       " 'label_names': ['labels'],\n",
       " 'can_return_loss': False,\n",
       " '_train_batch_size': 2,\n",
       " '_created_lr_scheduler': False}"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.__dict__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "bc1dcf2d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Currently training with a batch size of: 2\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2023-12-05 08:09:30,008] [INFO] [real_accelerator.py:161:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n",
      "[2023-12-05 08:09:32,214] [INFO] [logging.py:96:log_dist] [Rank -1] DeepSpeed info: version=0.12.4, git-hash=unknown, git-branch=unknown\n",
      "[2023-12-05 08:09:32,215] [INFO] [comm.py:637:init_distributed] cdb=None\n",
      "[2023-12-05 08:09:32,215] [INFO] [comm.py:652:init_distributed] Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...\n",
      "[2023-12-05 08:09:32,603] [INFO] [comm.py:702:mpi_discovery] Discovered MPI settings of world_rank=0, local_rank=0, world_size=1, master_addr=10.208.0.238, master_port=29500\n",
      "[2023-12-05 08:09:32,604] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl\n",
      "[2023-12-05 08:09:34,053] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using /home/ubuntu/.cache/torch_extensions/py310_cu118 as PyTorch extensions root...\n",
      "Detected CUDA files, patching ldflags\n",
      "Emitting ninja build file /home/ubuntu/.cache/torch_extensions/py310_cu118/cpu_adam/build.ninja...\n",
      "Building extension module cpu_adam...\n",
      "Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)\n",
      "Loading extension module cpu_adam...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ninja: no work to do.\n",
      "Time to load cpu_adam op: 2.297410726547241 seconds\n",
      "[2023-12-05 08:09:38,727] [INFO] [logging.py:96:log_dist] [Rank 0] Using DeepSpeed Optimizer param name adamw as basic optimizer\n",
      "[2023-12-05 08:09:38,727] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer\n",
      "[2023-12-05 08:09:38,735] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = DeepSpeedCPUAdam\n",
      "[2023-12-05 08:09:38,735] [INFO] [utils.py:56:is_zero_supported_optimizer] Checking ZeRO support for optimizer=DeepSpeedCPUAdam type=<class 'deepspeed.ops.adam.cpu_adam.DeepSpeedCPUAdam'>\n",
      "[2023-12-05 08:09:38,736] [INFO] [logging.py:96:log_dist] [Rank 0] Creating fp16 ZeRO stage 3 optimizer, MiCS is enabled False, Hierarchical params gather False\n",
      "[2023-12-05 08:09:38,736] [INFO] [logging.py:96:log_dist] [Rank 0] Creating torch.bfloat16 ZeRO stage 3 optimizer\n",
      "[2023-12-05 08:09:38,856] [INFO] [utils.py:795:see_memory_usage] Stage 3 initialize beginning\n",
      "[2023-12-05 08:09:38,857] [INFO] [utils.py:796:see_memory_usage] MA 0.21 GB         Max_MA 0.21 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:38,858] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.04 GB, percent = 13.0%\n",
      "[2023-12-05 08:09:38,859] [INFO] [stage3.py:127:__init__] Reduce bucket size 589824\n",
      "[2023-12-05 08:09:38,860] [INFO] [stage3.py:128:__init__] Prefetch bucket size 530841\n",
      "[2023-12-05 08:09:38,968] [INFO] [utils.py:795:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]\n",
      "[2023-12-05 08:09:38,969] [INFO] [utils.py:796:see_memory_usage] MA 0.21 GB         Max_MA 0.21 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:38,970] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.04 GB, percent = 13.0%\n",
      "Parameter Offload: Total persistent parameters: 277248 in 121 params\n",
      "[2023-12-05 08:09:39,374] [INFO] [utils.py:795:see_memory_usage] DeepSpeedZeRoOffload initialize [end]\n",
      "[2023-12-05 08:09:39,375] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.21 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:39,376] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.45 GB, percent = 13.2%\n",
      "[2023-12-05 08:09:39,509] [INFO] [utils.py:795:see_memory_usage] Before creating fp16 partitions\n",
      "[2023-12-05 08:09:39,510] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:39,511] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.46 GB, percent = 13.2%\n",
      "[2023-12-05 08:09:40,120] [INFO] [utils.py:795:see_memory_usage] After creating fp16 partitions: 2\n",
      "[2023-12-05 08:09:40,122] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:40,123] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.74 GB, percent = 13.3%\n",
      "[2023-12-05 08:09:40,239] [INFO] [utils.py:795:see_memory_usage] Before creating fp32 partitions\n",
      "[2023-12-05 08:09:40,240] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:40,241] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 28.74 GB, percent = 13.3%\n",
      "[2023-12-05 08:09:40,401] [INFO] [utils.py:795:see_memory_usage] After creating fp32 partitions\n",
      "[2023-12-05 08:09:40,402] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:40,403] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 29.16 GB, percent = 13.5%\n",
      "[2023-12-05 08:09:40,520] [INFO] [utils.py:795:see_memory_usage] Before initializing optimizer states\n",
      "[2023-12-05 08:09:40,521] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:40,522] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 29.16 GB, percent = 13.5%\n",
      "[2023-12-05 08:09:41,001] [INFO] [utils.py:795:see_memory_usage] After initializing optimizer states\n",
      "[2023-12-05 08:09:41,002] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.0 GB         CA 0.23 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:41,003] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 30.51 GB, percent = 14.1%\n",
      "[2023-12-05 08:09:41,004] [INFO] [stage3.py:479:_setup_for_real_optimizer] optimizer state initialized\n",
      "[2023-12-05 08:09:41,349] [INFO] [utils.py:795:see_memory_usage] After initializing ZeRO optimizer\n",
      "[2023-12-05 08:09:41,350] [INFO] [utils.py:796:see_memory_usage] MA 0.0 GB         Max_MA 0.09 GB         CA 0.28 GB         Max_CA 0 GB \n",
      "[2023-12-05 08:09:41,351] [INFO] [utils.py:803:see_memory_usage] CPU Virtual Memory:  used = 30.77 GB, percent = 14.2%\n",
      "[2023-12-05 08:09:41,352] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = adamw\n",
      "[2023-12-05 08:09:41,352] [WARNING] [lr_schedules.py:759:__init__] total_num_steps 10 is less than warmup_num_steps 1000\n",
      "[2023-12-05 08:09:41,353] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using configured LR scheduler = WarmupDecayLR\n",
      "[2023-12-05 08:09:41,353] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = <deepspeed.runtime.lr_schedules.WarmupDecayLR object at 0x7f752027f0a0>\n",
      "[2023-12-05 08:09:41,354] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[1e-05], mom=[[0.9, 0.999]]\n",
      "[2023-12-05 08:09:41,354] [INFO] [config.py:979:print] DeepSpeedEngine configuration:\n",
      "[2023-12-05 08:09:41,355] [INFO] [config.py:983:print]   activation_checkpointing_config  {\n",
      "    \"partition_activations\": false, \n",
      "    \"contiguous_memory_optimization\": false, \n",
      "    \"cpu_checkpointing\": false, \n",
      "    \"number_checkpoints\": null, \n",
      "    \"synchronize_checkpoint_boundary\": false, \n",
      "    \"profile\": false\n",
      "}\n",
      "[2023-12-05 08:09:41,355] [INFO] [config.py:983:print]   aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}\n",
      "[2023-12-05 08:09:41,356] [INFO] [config.py:983:print]   amp_enabled .................. False\n",
      "[2023-12-05 08:09:41,356] [INFO] [config.py:983:print]   amp_params ................... False\n",
      "[2023-12-05 08:09:41,357] [INFO] [config.py:983:print]   autotuning_config ............ {\n",
      "    \"enabled\": false, \n",
      "    \"start_step\": null, \n",
      "    \"end_step\": null, \n",
      "    \"metric_path\": null, \n",
      "    \"arg_mappings\": null, \n",
      "    \"metric\": \"throughput\", \n",
      "    \"model_info\": null, \n",
      "    \"results_dir\": \"autotuning_results\", \n",
      "    \"exps_dir\": \"autotuning_exps\", \n",
      "    \"overwrite\": true, \n",
      "    \"fast\": true, \n",
      "    \"start_profile_step\": 3, \n",
      "    \"end_profile_step\": 5, \n",
      "    \"tuner_type\": \"gridsearch\", \n",
      "    \"tuner_early_stopping\": 5, \n",
      "    \"tuner_num_trials\": 50, \n",
      "    \"model_info_path\": null, \n",
      "    \"mp_size\": 1, \n",
      "    \"max_train_batch_size\": null, \n",
      "    \"min_train_batch_size\": 1, \n",
      "    \"max_train_micro_batch_size_per_gpu\": 1.024000e+03, \n",
      "    \"min_train_micro_batch_size_per_gpu\": 1, \n",
      "    \"num_tuning_micro_batch_sizes\": 3\n",
      "}\n",
      "[2023-12-05 08:09:41,357] [INFO] [config.py:983:print]   bfloat16_enabled ............. True\n",
      "[2023-12-05 08:09:41,357] [INFO] [config.py:983:print]   checkpoint_parallel_write_pipeline  False\n",
      "[2023-12-05 08:09:41,358] [INFO] [config.py:983:print]   checkpoint_tag_validation_enabled  True\n",
      "[2023-12-05 08:09:41,358] [INFO] [config.py:983:print]   checkpoint_tag_validation_fail  False\n",
      "[2023-12-05 08:09:41,358] [INFO] [config.py:983:print]   comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f74231df010>\n",
      "[2023-12-05 08:09:41,359] [INFO] [config.py:983:print]   communication_data_type ...... None\n",
      "[2023-12-05 08:09:41,359] [INFO] [config.py:983:print]   compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2023-12-05 08:09:41,360] [INFO] [config.py:983:print]   curriculum_enabled_legacy .... False\n",
      "[2023-12-05 08:09:41,360] [INFO] [config.py:983:print]   curriculum_params_legacy ..... False\n",
      "[2023-12-05 08:09:41,360] [INFO] [config.py:983:print]   data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}\n",
      "[2023-12-05 08:09:41,361] [INFO] [config.py:983:print]   data_efficiency_enabled ...... False\n",
      "[2023-12-05 08:09:41,361] [INFO] [config.py:983:print]   dataloader_drop_last ......... False\n",
      "[2023-12-05 08:09:41,363] [INFO] [config.py:983:print]   disable_allgather ............ False\n",
      "[2023-12-05 08:09:41,363] [INFO] [config.py:983:print]   dump_state ................... False\n",
      "[2023-12-05 08:09:41,364] [INFO] [config.py:983:print]   dynamic_loss_scale_args ...... None\n",
      "[2023-12-05 08:09:41,364] [INFO] [config.py:983:print]   eigenvalue_enabled ........... False\n",
      "[2023-12-05 08:09:41,364] [INFO] [config.py:983:print]   eigenvalue_gas_boundary_resolution  1\n",
      "[2023-12-05 08:09:41,365] [INFO] [config.py:983:print]   eigenvalue_layer_name ........ bert.encoder.layer\n",
      "[2023-12-05 08:09:41,365] [INFO] [config.py:983:print]   eigenvalue_layer_num ......... 0\n",
      "[2023-12-05 08:09:41,365] [INFO] [config.py:983:print]   eigenvalue_max_iter .......... 100\n",
      "[2023-12-05 08:09:41,366] [INFO] [config.py:983:print]   eigenvalue_stability ......... 1e-06\n",
      "[2023-12-05 08:09:41,367] [INFO] [config.py:983:print]   eigenvalue_tol ............... 0.01\n",
      "[2023-12-05 08:09:41,367] [INFO] [config.py:983:print]   eigenvalue_verbose ........... False\n",
      "[2023-12-05 08:09:41,368] [INFO] [config.py:983:print]   elasticity_enabled ........... False\n",
      "[2023-12-05 08:09:41,368] [INFO] [config.py:983:print]   flops_profiler_config ........ {\n",
      "    \"enabled\": false, \n",
      "    \"recompute_fwd_factor\": 0.0, \n",
      "    \"profile_step\": 1, \n",
      "    \"module_depth\": -1, \n",
      "    \"top_modules\": 1, \n",
      "    \"detailed\": true, \n",
      "    \"output_file\": null\n",
      "}\n",
      "[2023-12-05 08:09:41,368] [INFO] [config.py:983:print]   fp16_auto_cast ............... None\n",
      "[2023-12-05 08:09:41,369] [INFO] [config.py:983:print]   fp16_enabled ................. False\n",
      "[2023-12-05 08:09:41,369] [INFO] [config.py:983:print]   fp16_master_weights_and_gradients  False\n",
      "[2023-12-05 08:09:41,369] [INFO] [config.py:983:print]   global_rank .................. 0\n",
      "[2023-12-05 08:09:41,370] [INFO] [config.py:983:print]   grad_accum_dtype ............. None\n",
      "[2023-12-05 08:09:41,370] [INFO] [config.py:983:print]   gradient_accumulation_steps .. 1\n",
      "[2023-12-05 08:09:41,370] [INFO] [config.py:983:print]   gradient_clipping ............ 1.0\n",
      "[2023-12-05 08:09:41,370] [INFO] [config.py:983:print]   gradient_predivide_factor .... 1.0\n",
      "[2023-12-05 08:09:41,371] [INFO] [config.py:983:print]   hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8\n",
      "[2023-12-05 08:09:41,371] [INFO] [config.py:983:print]   initial_dynamic_scale ........ 1\n",
      "[2023-12-05 08:09:41,371] [INFO] [config.py:983:print]   load_universal_checkpoint .... False\n",
      "[2023-12-05 08:09:41,372] [INFO] [config.py:983:print]   loss_scale ................... 1.0\n",
      "[2023-12-05 08:09:41,372] [INFO] [config.py:983:print]   memory_breakdown ............. False\n",
      "[2023-12-05 08:09:41,372] [INFO] [config.py:983:print]   mics_hierarchial_params_gather  False\n",
      "[2023-12-05 08:09:41,373] [INFO] [config.py:983:print]   mics_shard_size .............. -1\n",
      "[2023-12-05 08:09:41,373] [INFO] [config.py:983:print]   monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False\n",
      "[2023-12-05 08:09:41,373] [INFO] [config.py:983:print]   nebula_config ................ {\n",
      "    \"enabled\": false, \n",
      "    \"persistent_storage_path\": null, \n",
      "    \"persistent_time_interval\": 100, \n",
      "    \"num_of_version_in_retention\": 2, \n",
      "    \"enable_nebula_load\": true, \n",
      "    \"load_path\": null\n",
      "}\n",
      "[2023-12-05 08:09:41,374] [INFO] [config.py:983:print]   optimizer_legacy_fusion ...... False\n",
      "[2023-12-05 08:09:41,374] [INFO] [config.py:983:print]   optimizer_name ............... adamw\n",
      "[2023-12-05 08:09:41,374] [INFO] [config.py:983:print]   optimizer_params ............. {'lr': 1e-05, 'betas': [0.9, 0.999], 'eps': 1e-08, 'weight_decay': 0.1}\n",
      "[2023-12-05 08:09:41,375] [INFO] [config.py:983:print]   pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': True, 'grad_partitioned': True}\n",
      "[2023-12-05 08:09:41,375] [INFO] [config.py:983:print]   pld_enabled .................. False\n",
      "[2023-12-05 08:09:41,375] [INFO] [config.py:983:print]   pld_params ................... False\n",
      "[2023-12-05 08:09:41,376] [INFO] [config.py:983:print]   prescale_gradients ........... False\n",
      "[2023-12-05 08:09:41,376] [INFO] [config.py:983:print]   scheduler_name ............... WarmupDecayLR\n",
      "[2023-12-05 08:09:41,376] [INFO] [config.py:983:print]   scheduler_params ............. {'warmup_min_lr': 0, 'warmup_max_lr': 1e-05, 'warmup_num_steps': 1000, 'total_num_steps': 10}\n",
      "[2023-12-05 08:09:41,377] [INFO] [config.py:983:print]   seq_parallel_communication_data_type  torch.float32\n",
      "[2023-12-05 08:09:41,377] [INFO] [config.py:983:print]   sparse_attention ............. None\n",
      "[2023-12-05 08:09:41,377] [INFO] [config.py:983:print]   sparse_gradients_enabled ..... False\n",
      "[2023-12-05 08:09:41,377] [INFO] [config.py:983:print]   steps_per_print .............. inf\n",
      "[2023-12-05 08:09:41,378] [INFO] [config.py:983:print]   train_batch_size ............. 2\n",
      "[2023-12-05 08:09:41,378] [INFO] [config.py:983:print]   train_micro_batch_size_per_gpu  2\n",
      "[2023-12-05 08:09:41,378] [INFO] [config.py:983:print]   use_data_before_expert_parallel_  False\n",
      "[2023-12-05 08:09:41,379] [INFO] [config.py:983:print]   use_node_local_storage ....... False\n",
      "[2023-12-05 08:09:41,379] [INFO] [config.py:983:print]   wall_clock_breakdown ......... False\n",
      "[2023-12-05 08:09:41,379] [INFO] [config.py:983:print]   weight_quantization_config ... None\n",
      "[2023-12-05 08:09:41,380] [INFO] [config.py:983:print]   world_size ................... 1\n",
      "[2023-12-05 08:09:41,380] [INFO] [config.py:983:print]   zero_allow_untested_optimizer  False\n",
      "[2023-12-05 08:09:41,380] [INFO] [config.py:983:print]   zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=589824 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='cpu', nvme_path=None, buffer_count=5, buffer_size=100,000,000, max_in_cpu=1,000,000,000, pin_memory=True) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='cpu', nvme_path=None, buffer_count=4, pin_memory=True, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False, ratio=1.0) sub_group_size=100000000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=530841 param_persistence_threshold=7680 model_persistence_threshold=sys.maxsize max_live_parameters=100000000 max_reuse_distance=100000000 gather_16bit_weights_on_model_save=True stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True\n",
      "[2023-12-05 08:09:41,381] [INFO] [config.py:983:print]   zero_enabled ................. True\n",
      "[2023-12-05 08:09:41,381] [INFO] [config.py:983:print]   zero_force_ds_cpu_optimizer .. True\n",
      "[2023-12-05 08:09:41,381] [INFO] [config.py:983:print]   zero_optimization_stage ...... 3\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2023-12-05 08:09:41,382] [INFO] [config.py:969:print_user_config]   json = {\n",
      "    \"comms_logger\": {\n",
      "        \"enabled\": true, \n",
      "        \"debug\": true\n",
      "    }, \n",
      "    \"fp16\": {\n",
      "        \"enabled\": false, \n",
      "        \"loss_scale\": 0, \n",
      "        \"loss_scale_window\": 1000, \n",
      "        \"initial_scale_power\": 16, \n",
      "        \"hysteresis\": 2, \n",
      "        \"min_loss_scale\": 1\n",
      "    }, \n",
      "    \"bf16\": {\n",
      "        \"enabled\": true\n",
      "    }, \n",
      "    \"optimizer\": {\n",
      "        \"type\": \"AdamW\", \n",
      "        \"params\": {\n",
      "            \"lr\": 1e-05, \n",
      "            \"betas\": [0.9, 0.999], \n",
      "            \"eps\": 1e-08, \n",
      "            \"weight_decay\": 0.1\n",
      "        }\n",
      "    }, \n",
      "    \"scheduler\": {\n",
      "        \"type\": \"WarmupDecayLR\", \n",
      "        \"params\": {\n",
      "            \"warmup_min_lr\": 0, \n",
      "            \"warmup_max_lr\": 1e-05, \n",
      "            \"warmup_num_steps\": 1000, \n",
      "            \"total_num_steps\": 10\n",
      "        }\n",
      "    }, \n",
      "    \"zero_optimization\": {\n",
      "        \"stage\": 3, \n",
      "        \"offload_optimizer\": {\n",
      "            \"device\": \"cpu\", \n",
      "            \"pin_memory\": true\n",
      "        }, \n",
      "        \"offload_param\": {\n",
      "            \"device\": \"cpu\", \n",
      "            \"pin_memory\": true\n",
      "        }, \n",
      "        \"overlap_comm\": true, \n",
      "        \"contiguous_gradients\": true, \n",
      "        \"sub_group_size\": 1.000000e+08, \n",
      "        \"reduce_bucket_size\": 5.898240e+05, \n",
      "        \"stage3_prefetch_bucket_size\": 5.308416e+05, \n",
      "        \"stage3_param_persistence_threshold\": 7.680000e+03, \n",
      "        \"stage3_max_live_parameters\": 1.000000e+08, \n",
      "        \"stage3_max_reuse_distance\": 1.000000e+08, \n",
      "        \"stage3_gather_16bit_weights_on_model_save\": true\n",
      "    }, \n",
      "    \"gradient_accumulation_steps\": 1, \n",
      "    \"gradient_clipping\": 1.0, \n",
      "    \"steps_per_print\": inf, \n",
      "    \"train_batch_size\": 2, \n",
      "    \"train_micro_batch_size_per_gpu\": 2, \n",
      "    \"wall_clock_breakdown\": false\n",
      "}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "***** Running training *****\n",
      "  Num examples = 385,224\n",
      "  Num Epochs = 1\n",
      "  Instantaneous batch size per device = 2\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 2\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 10\n",
      "  Number of trainable parameters = 115,096,320\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "327342\n",
      "345828\n",
      "318900\n",
      "69472\n",
      "Adam Optimizer #0 is created with AVX2 arithmetic capability.\n",
      "Config: alpha=0.000010, betas=(0.900000, 0.999000), weight_decay=0.100000, adam_w=1\n",
      "tensor(10.5000, device='cuda:0', dtype=torch.bfloat16,\n",
      "       grad_fn=<NllLossBackward0>) tensor([[-0.7578, -0.7734, -0.1855,  ...,  0.9180, -0.0437, -0.3613],\n",
      "        [-0.3848, -0.4629, -0.1719,  ..., -0.2500,  0.8203, -0.8633],\n",
      "        [-0.2910,  0.0214, -0.3926,  ..., -0.1289, -0.2598, -0.4727],\n",
      "        ...,\n",
      "        [-0.6289,  0.2139,  0.2715,  ..., -0.9844,  0.5000,  0.8281],\n",
      "        [ 0.4141, -0.0513,  0.9531,  ...,  0.3223,  0.3164, -0.2578],\n",
      "        [-0.6211,  0.3555, -0.3926,  ...,  0.4922,  0.7969,  0.5273]],\n",
      "       device='cuda:0', dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([ 6845,  5341,  3474,  ...,    11, 15119,  4318], device='cuda:0')\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='10' max='10' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [10/10 00:04, Epoch 0/1]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Step</th>\n",
       "      <th>Training Loss</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>10.500000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>5</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>6</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>8</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>10</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "109610\n",
      "38573\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([7081,  313, 8563,  ..., 2228,   15,  436], device='cuda:0')\n",
      "273694\n",
      "218013\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([201,  66, 201,  ..., 927,  15, 737], device='cuda:0')\n",
      "204334\n",
      "318572\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([ 9924,  8209,  1457,  ..., 15603,    29,   298], device='cuda:0')\n",
      "104242\n",
      "261743\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([  344,  4390,  8678,  ...,  1999, 19738,  4748], device='cuda:0')\n",
      "235058\n",
      "178647\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([   77,   201,    66,  ...,  4833,   521, 23351], device='cuda:0')\n",
      "225275\n",
      "290107\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([  267, 23724,  1206,  ...,   650, 29570,   628], device='cuda:0')\n",
      "105315\n",
      "358768\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([2582, 2150,  492,  ...,  709,   17, 1999], device='cuda:0')\n",
      "332421\n",
      "112040\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([  201,    37, 12861,  ..., 13257,    17,  7103], device='cuda:0')\n",
      "198053\n",
      "378413\n",
      "tensor(nan, device='cuda:0', dtype=torch.bfloat16, grad_fn=<NllLossBackward0>) tensor([[nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        ...,\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan],\n",
      "        [nan, nan, nan,  ..., nan, nan, nan]], device='cuda:0',\n",
      "       dtype=torch.bfloat16, grad_fn=<ViewBackward0>) tensor([ 1122,  8452, 16062,  ...,   201,    51,  7247], device='cuda:0')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
      "\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=10, training_loss=1.05, metrics={'train_runtime': 20.2518, 'train_samples_per_second': 0.988, 'train_steps_per_second': 0.494, 'total_flos': 0.0, 'train_loss': 1.05, 'epoch': 0.0})"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "091c7b17",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
