{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0d467a70",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:22:42.334686Z",
     "start_time": "2024-03-17T04:22:40.628262Z"
    }
   },
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "09b953f5",
   "metadata": {},
   "source": [
    "## basics"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0a1de2b7",
   "metadata": {},
   "source": [
    "- references:\n",
    "    - https://pytorch.org/docs/stable/tensor_attributes.html#torch-dtype"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c84616e1",
   "metadata": {},
   "source": [
    "\n",
    "| **dtype**    | **等价形式**   | **Data type**           | **comment** |\n",
    "|--------------|----------------|-------------------------|-------------|\n",
    "| torch.half   | torch.float16  | 16-bit floating point 1 |             |\n",
    "|              | torch.bfloat16 | 16-bit floating point 2 |             |\n",
    "| torch.float  | torch.float32  |                         |             |\n",
    "| torch.double | torch.float64  |                         |             |\n",
    "| torch.short  | torch.int16    |                         |             |\n",
    "| torch.int    | torch.int32    |                         |             |\n",
    "| torch.int    | torch.int8     |                         |             |\n",
    "| torch.long   | torch.int64    | 64-bit integer (signed) |             |"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e09f2765",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-10T04:50:17.696862Z",
     "start_time": "2024-03-10T04:50:17.691024Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.int64"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.long"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "708dccae",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-10T01:51:37.466940Z",
     "start_time": "2024-03-10T01:51:37.455956Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.int32\n",
      "torch.float32\n",
      "torch.int64\n"
     ]
    }
   ],
   "source": [
    "X = [[1, 2], [3, 4]]\n",
    "\n",
    "x1 = torch.IntTensor(X)\n",
    "x2 = torch.Tensor(X)\n",
    "x3 = torch.tensor(X)\n",
    "\n",
    "print(x1.dtype)  # torch.int32\n",
    "print(x2.dtype)  # torch.float32\n",
    "print(x3.dtype)  # torch.int64, torch.tensor() infers the data type automatically."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "9397c08f",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-10T01:53:33.954464Z",
     "start_time": "2024-03-10T01:53:33.944749Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x3.dtype == torch.long"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "558f3b97",
   "metadata": {},
   "source": [
    "## torch_dtype 与 load_in_8bit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c72edd1e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:22:57.522200Z",
     "start_time": "2024-03-17T04:22:57.093864Z"
    }
   },
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "f638acaa",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-02-23T11:38:46.011960Z",
     "start_time": "2024-02-23T11:38:25.360839Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "486.7002410888672\n",
      "torch.float32 transformer.wte.weight torch.Size([50257, 768])\n",
      "torch.float32 transformer.wpe.weight torch.Size([1024, 768])\n",
      "torch.float32 transformer.h.0.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.0.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.0.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.0.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.0.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.0.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.0.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.0.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.0.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.0.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.0.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.0.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.1.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.1.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.1.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.1.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.1.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.1.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.1.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.1.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.1.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.1.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.1.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.1.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.2.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.2.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.2.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.2.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.2.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.2.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.2.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.2.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.2.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.2.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.2.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.2.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.3.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.3.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.3.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.3.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.3.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.3.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.3.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.3.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.3.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.3.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.3.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.3.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.4.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.4.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.4.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.4.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.4.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.4.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.4.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.4.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.4.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.4.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.4.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.4.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.5.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.5.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.5.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.5.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.5.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.5.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.5.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.5.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.5.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.5.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.5.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.5.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.6.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.6.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.6.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.6.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.6.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.6.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.6.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.6.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.6.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.6.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.6.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.6.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.7.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.7.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.7.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.7.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.7.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.7.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.7.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.7.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.7.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.7.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.7.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.7.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.8.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.8.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.8.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.8.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.8.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.8.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.8.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.8.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.8.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.8.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.8.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.8.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.9.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.9.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.9.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.9.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.9.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.9.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.9.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.9.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.9.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.9.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.9.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.9.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.10.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.10.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.10.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.10.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.10.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.10.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.10.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.10.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.10.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.10.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.10.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.10.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.11.ln_1.weight torch.Size([768])\n",
      "torch.float32 transformer.h.11.ln_1.bias torch.Size([768])\n",
      "torch.float32 transformer.h.11.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float32 transformer.h.11.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float32 transformer.h.11.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float32 transformer.h.11.attn.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.h.11.ln_2.weight torch.Size([768])\n",
      "torch.float32 transformer.h.11.ln_2.bias torch.Size([768])\n",
      "torch.float32 transformer.h.11.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float32 transformer.h.11.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float32 transformer.h.11.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float32 transformer.h.11.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float32 transformer.ln_f.weight torch.Size([768])\n",
      "torch.float32 transformer.ln_f.bias torch.Size([768])\n"
     ]
    }
   ],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained('gpt2')\n",
    "\n",
    "print(model.get_memory_footprint() / (1024**2))\n",
    "for name, para in model.named_parameters():\n",
    "    print(para.dtype, name, para.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "be2eb91a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-02-23T12:04:53.772030Z",
     "start_time": "2024-02-23T12:04:11.244086Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "249.3501205444336\n",
      "torch.float16 transformer.wte.weight torch.Size([50257, 768])\n",
      "torch.float16 transformer.wpe.weight torch.Size([1024, 768])\n",
      "torch.float16 transformer.h.0.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.0.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.0.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.0.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.0.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.0.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.0.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.0.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.0.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.1.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.1.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.1.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.1.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.1.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.1.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.2.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.2.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.2.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.2.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.2.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.2.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.3.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.3.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.3.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.3.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.3.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.3.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.4.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.4.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.4.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.4.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.4.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.4.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.5.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.5.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.5.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.5.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.5.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.5.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.6.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.6.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.6.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.6.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.6.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.6.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.7.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.7.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.7.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.7.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.7.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.7.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.8.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.8.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.8.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.8.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.8.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.8.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.9.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.9.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.9.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.9.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.9.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.9.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.10.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.10.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.10.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.10.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.10.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.10.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.attn.c_attn.weight torch.Size([768, 2304])\n",
      "torch.float16 transformer.h.11.attn.c_attn.bias torch.Size([2304])\n",
      "torch.float16 transformer.h.11.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.11.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.mlp.c_fc.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.11.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.float16 transformer.h.11.mlp.c_proj.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.11.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.ln_f.weight torch.Size([768])\n",
      "torch.float16 transformer.ln_f.bias torch.Size([768])\n"
     ]
    }
   ],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained('gpt2', torch_dtype=torch.float16)\n",
    "print(model.get_memory_footprint() / (1024**2))\n",
    "for name, para in model.named_parameters():\n",
    "    print(para.dtype, name, para.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "b7b32907",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-02-23T12:05:47.727982Z",
     "start_time": "2024-02-23T12:05:06.595907Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "168.3501205444336\n",
      "torch.float16 transformer.wte.weight torch.Size([50257, 768])\n",
      "torch.float16 transformer.wpe.weight torch.Size([1024, 768])\n",
      "torch.float16 transformer.h.0.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.0.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.0.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.0.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.0.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.0.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.0.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.0.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.0.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.1.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.1.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.1.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.1.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.1.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.1.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.1.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.1.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.2.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.2.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.2.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.2.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.2.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.2.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.2.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.2.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.3.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.3.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.3.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.3.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.3.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.3.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.3.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.3.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.4.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.4.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.4.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.4.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.4.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.4.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.4.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.4.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.5.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.5.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.5.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.5.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.5.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.5.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.5.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.5.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.6.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.6.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.6.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.6.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.6.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.6.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.6.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.6.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.7.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.7.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.7.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.7.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.7.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.7.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.7.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.7.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.8.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.8.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.8.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.8.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.8.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.8.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.8.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.8.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.9.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.9.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.9.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.9.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.9.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.9.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.9.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.9.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.10.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.10.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.10.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.10.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.10.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.10.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.10.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.10.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.bias torch.Size([768])\n",
      "torch.int8 transformer.h.11.attn.c_attn.weight torch.Size([2304, 768])\n",
      "torch.float16 transformer.h.11.attn.c_attn.bias torch.Size([2304])\n",
      "torch.int8 transformer.h.11.attn.c_proj.weight torch.Size([768, 768])\n",
      "torch.float16 transformer.h.11.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.bias torch.Size([768])\n",
      "torch.int8 transformer.h.11.mlp.c_fc.weight torch.Size([3072, 768])\n",
      "torch.float16 transformer.h.11.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.int8 transformer.h.11.mlp.c_proj.weight torch.Size([768, 3072])\n",
      "torch.float16 transformer.h.11.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.ln_f.weight torch.Size([768])\n",
      "torch.float16 transformer.ln_f.bias torch.Size([768])\n"
     ]
    }
   ],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained('gpt2', torch_dtype=torch.float16, load_in_8bit=True)\n",
    "print(model.get_memory_footprint() / (1024**2))\n",
    "for name, para in model.named_parameters():\n",
    "    print(para.dtype, name, para.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7f698114",
   "metadata": {},
   "source": [
    "- transformer.wte.weight、transformer.wpe.weight： torch.float16\n",
    "- h.0 - h.11\n",
    "    - ln_1.weight, ln_1.bias, ln_2.weight, ln_2.bias: torch.float16\n",
    "    - attn\n",
    "        - c_attn.weight: torch.int8\n",
    "            - bias: torch.float16\n",
    "        - c_proj.weight: torch.int8\n",
    "            - bias: torch.float16\n",
    "    - mlp\n",
    "        - c_fc.weight: torch.int8\n",
    "                - bias: torch.float16\n",
    "- ln_f.weight, ln_f.bias: torch.float16"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "70e0bf6f",
   "metadata": {},
   "source": [
    "## bitsandbytes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "0c844fc8",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:23:38.732413Z",
     "start_time": "2024-03-17T04:23:38.716504Z"
    }
   },
   "outputs": [],
   "source": [
    "from transformers import BitsAndBytesConfig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "fc17c727",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:23:40.416479Z",
     "start_time": "2024-03-17T04:23:40.405041Z"
    }
   },
   "outputs": [],
   "source": [
    "bnb_config = BitsAndBytesConfig(\n",
    "    load_in_4bit=True,\n",
    "    bnb_4bit_quant_type=\"nf4\",\n",
    "    bnb_4bit_compute_dtype=torch.bfloat16,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "cf6208c7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:24:52.283899Z",
     "start_time": "2024-03-17T04:24:05.575929Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/whaow/workspaces/learning/transformers/src/transformers/models/auto/auto_factory.py:472: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2024-03-17 12:24:27,303] [INFO] [real_accelerator.py:161:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
     ]
    }
   ],
   "source": [
    "base_model = AutoModelForCausalLM.from_pretrained(\n",
    "    'gpt2',\n",
    "    quantization_config=bnb_config,\n",
    "    device_map={\"\": 0},\n",
    "    trust_remote_code=True,\n",
    "    use_auth_token=True,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "bbf022f0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-17T04:25:03.002266Z",
     "start_time": "2024-03-17T04:25:02.985340Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "127.8501205444336\n",
      "torch.float16 transformer.wte.weight torch.Size([50257, 768])\n",
      "torch.float16 transformer.wpe.weight torch.Size([1024, 768])\n",
      "torch.float16 transformer.h.0.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.0.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.0.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.0.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.0.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.0.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.0.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.0.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.0.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.0.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.1.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.1.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.1.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.1.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.1.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.1.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.1.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.1.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.1.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.2.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.2.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.2.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.2.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.2.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.2.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.2.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.2.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.2.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.3.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.3.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.3.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.3.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.3.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.3.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.3.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.3.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.3.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.4.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.4.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.4.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.4.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.4.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.4.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.4.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.4.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.4.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.5.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.5.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.5.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.5.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.5.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.5.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.5.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.5.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.5.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.6.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.6.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.6.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.6.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.6.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.6.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.6.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.6.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.6.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.7.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.7.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.7.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.7.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.7.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.7.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.7.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.7.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.7.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.8.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.8.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.8.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.8.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.8.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.8.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.8.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.8.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.8.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.9.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.9.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.9.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.9.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.9.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.9.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.9.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.9.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.9.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.10.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.10.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.10.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.10.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.10.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.10.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.10.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.10.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.10.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_1.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.11.attn.c_attn.weight torch.Size([884736, 1])\n",
      "torch.float16 transformer.h.11.attn.c_attn.bias torch.Size([2304])\n",
      "torch.uint8 transformer.h.11.attn.c_proj.weight torch.Size([294912, 1])\n",
      "torch.float16 transformer.h.11.attn.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.weight torch.Size([768])\n",
      "torch.float16 transformer.h.11.ln_2.bias torch.Size([768])\n",
      "torch.uint8 transformer.h.11.mlp.c_fc.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.11.mlp.c_fc.bias torch.Size([3072])\n",
      "torch.uint8 transformer.h.11.mlp.c_proj.weight torch.Size([1179648, 1])\n",
      "torch.float16 transformer.h.11.mlp.c_proj.bias torch.Size([768])\n",
      "torch.float16 transformer.ln_f.weight torch.Size([768])\n",
      "torch.float16 transformer.ln_f.bias torch.Size([768])\n"
     ]
    }
   ],
   "source": [
    "print(base_model.get_memory_footprint() / (1024**2))\n",
    "for name, para in base_model.named_parameters():\n",
    "    print(para.dtype, name, para.shape)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
