{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 关于Share Parameter的MoE\n",
    "1. 与大邹老师关于实现方式上产生了分歧\n",
    "2. 李老师对于显存占用的问题\n",
    "2. 寻找实现了的ViT库\n",
    "3. 使用更加复杂的数据集\n",
    "4. ~~与Vanilla ViT相比，我们这里用的是ReLU做Activation，它用GeLU~~\n",
    "\n",
    "https://arxiv.org/pdf/2309.04354.pdf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append(\"../\")\n",
    "from models.vit_moe import ViTMoE\n",
    "from models.vit_shareparam import ViTMoEShareParam\n",
    "from models.mixture_of_experts import MoE,MoEShareParam\n",
    "from utils import print_parameters\n",
    "moe_vit_shareparam = ViTMoEShareParam(share_dim=384)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Param Name:cls_token shape:torch.Size([1, 1, 768])\n",
      "Param Name:pos_embed shape:torch.Size([1, 197, 768])\n",
      "Param Name:patch_embed.proj.weight shape:torch.Size([768, 3, 16, 16])\n",
      "Param Name:patch_embed.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.0.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.0.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.0.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.0.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.0.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.0.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.0.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.0.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.0.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.1.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.1.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.1.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.1.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.1.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.1.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.1.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.1.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.1.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.1.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.2.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.2.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.2.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.2.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.2.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.2.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.2.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.2.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.2.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.2.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.3.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.3.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.3.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.3.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.3.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.3.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.3.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.3.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.3.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.3.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.4.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.4.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.4.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.4.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.4.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.4.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.4.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.4.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.4.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.4.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.5.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.5.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.5.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.5.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.5.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.5.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.5.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.5.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.5.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.5.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.6.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.6.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.6.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.6.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.6.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.6.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.6.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.6.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.6.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.6.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.7.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.7.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.7.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.7.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.7.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.7.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.7.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.7.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.7.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.7.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.8.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.8.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.8.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.8.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.8.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.8.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.8.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.8.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.8.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.8.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.9.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.9.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.9.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.9.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.9.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.9.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.9.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.9.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.9.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.9.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.10.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.10.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.10.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.10.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.10.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.10.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.10.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.10.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.10.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.10.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:blocks.11.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.11.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.11.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.11.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.11.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.11.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.11.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.11.mlp.gate.w_gating shape:torch.Size([384, 16])\n",
      "Param Name:blocks.11.mlp.experts.w1 shape:torch.Size([16, 384, 3072])\n",
      "Param Name:blocks.11.mlp.experts.w2 shape:torch.Size([16, 3072, 384])\n",
      "Param Name:norm.weight shape:torch.Size([768])\n",
      "Param Name:norm.bias shape:torch.Size([768])\n",
      "Param Name:head.weight shape:torch.Size([1000, 768])\n",
      "Param Name:head.bias shape:torch.Size([1000])\n",
      "Total parameters: 511.310056 Millon\n"
     ]
    }
   ],
   "source": [
    "print_parameters(moe_vit_shareparam)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1. 关于实现方式上面的分歧"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "moe_sp = MoEShareParam(dim=512,share_dim=256,num_experts=4,hidden_dim=4*512)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MoEShareParam(\n",
       "  (mlp): Sequential(\n",
       "    (0): Linear(in_features=256, out_features=2048, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=2048, out_features=256, bias=True)\n",
       "  )\n",
       "  (gate): Top2Gating()\n",
       "  (experts): Experts(\n",
       "    (act): ReLU()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "moe_sp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Param Name:mlp.0.weight shape:torch.Size([2048, 256])\n",
      "Param Name:mlp.0.bias shape:torch.Size([2048])\n",
      "Param Name:mlp.2.weight shape:torch.Size([256, 2048])\n",
      "Param Name:mlp.2.bias shape:torch.Size([256])\n",
      "Param Name:gate.w_gating shape:torch.Size([256, 4])\n",
      "Param Name:experts.w1 shape:torch.Size([4, 256, 2048])\n",
      "Param Name:experts.w2 shape:torch.Size([4, 2048, 256])\n",
      "Total parameters: 5.246208 Millon\n"
     ]
    }
   ],
   "source": [
    "print_parameters(moe_sp)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "实际上，FFN包含2个线性层，这里是源代码\n",
    "```python\n",
    "class Block(nn.Module):\n",
    "    def __init__(\n",
    "            self,\n",
    "            dim,\n",
    "            num_heads,\n",
    "            mlp_ratio=4.,\n",
    "            qkv_bias=False,\n",
    "            qk_norm=False,\n",
    "            proj_drop=0.,\n",
    "            attn_drop=0.,\n",
    "            init_values=None,\n",
    "            drop_path=0.,\n",
    "            act_layer=nn.GELU,\n",
    "            norm_layer=nn.LayerNorm,\n",
    "            mlp_layer=Mlp,\n",
    "    ):\n",
    "```\n",
    "我们默认是`mlp_ratio=4`，那么以ViTBasePatch16Size224举例，MLP由2层组成：\n",
    "\n",
    "$$\n",
    "Y_1 = W_1 X +b_1, W_1 \\in \\mathbb{R}^{768\\times 3072}\\\\\n",
    "Y_2 = W_2 Y_1 + b_2, W_2 \\in \\mathbb{R}^{3072\\times 768}\n",
    "$$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我的做法：\n",
    "1. $X$对半开分成$X,X^\\prime$\n",
    "2. 假设Share一半的Param，$W_1 \\in \\mathbb{R}^{384 \\times 3072},W_2 \\in \\mathbb{R}^{3072 \\times 384}$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. 显存占用问题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Param Name:cls_token shape:torch.Size([1, 1, 768])\n",
      "Param Name:pos_embed shape:torch.Size([1, 197, 768])\n",
      "Param Name:patch_embed.proj.weight shape:torch.Size([768, 3, 16, 16])\n",
      "Param Name:patch_embed.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.0.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.0.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.0.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.0.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.0.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.0.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.0.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.0.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.0.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.1.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.1.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.1.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.1.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.1.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.1.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.1.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.1.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.1.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.1.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.2.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.2.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.2.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.2.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.2.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.2.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.2.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.2.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.2.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.2.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.3.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.3.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.3.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.3.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.3.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.3.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.3.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.3.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.3.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.3.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.4.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.4.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.4.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.4.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.4.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.4.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.4.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.4.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.4.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.4.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.5.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.5.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.5.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.5.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.5.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.5.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.5.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.5.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.5.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.5.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.6.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.6.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.6.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.6.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.6.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.6.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.6.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.6.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.6.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.6.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.7.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.7.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.7.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.7.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.7.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.7.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.7.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.7.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.7.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.7.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.8.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.8.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.8.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.8.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.8.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.8.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.8.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.8.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.8.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.8.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.9.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.9.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.9.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.9.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.9.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.9.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.9.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.9.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.9.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.9.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.10.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.10.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.10.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.10.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.10.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.10.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.10.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.10.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.10.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.10.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:blocks.11.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.11.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.11.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.11.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.mlp.mlp.0.weight shape:torch.Size([3072, 384])\n",
      "Param Name:blocks.11.mlp.mlp.0.bias shape:torch.Size([3072])\n",
      "Param Name:blocks.11.mlp.mlp.2.weight shape:torch.Size([384, 3072])\n",
      "Param Name:blocks.11.mlp.mlp.2.bias shape:torch.Size([384])\n",
      "Param Name:blocks.11.mlp.gate.w_gating shape:torch.Size([384, 7])\n",
      "Param Name:blocks.11.mlp.experts.w1 shape:torch.Size([7, 384, 3072])\n",
      "Param Name:blocks.11.mlp.experts.w2 shape:torch.Size([7, 3072, 384])\n",
      "Param Name:norm.weight shape:torch.Size([768])\n",
      "Param Name:norm.bias shape:torch.Size([768])\n",
      "Param Name:head.weight shape:torch.Size([1000, 768])\n",
      "Param Name:head.bias shape:torch.Size([1000])\n",
      "Total parameters: 256.464616 Millon\n"
     ]
    }
   ],
   "source": [
    "print_parameters(ViTMoEShareParam(num_experts=7))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Param Name:cls_token shape:torch.Size([1, 1, 768])\n",
      "Param Name:pos_embed shape:torch.Size([1, 197, 768])\n",
      "Param Name:patch_embed.proj.weight shape:torch.Size([768, 3, 16, 16])\n",
      "Param Name:patch_embed.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.0.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.0.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.0.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.0.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.0.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.0.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.0.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.1.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.1.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.1.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.1.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.1.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.1.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.1.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.1.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.2.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.2.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.2.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.2.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.2.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.2.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.2.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.2.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.3.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.3.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.3.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.3.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.3.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.3.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.3.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.3.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.4.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.4.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.4.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.4.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.4.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.4.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.4.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.4.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.5.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.5.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.5.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.5.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.5.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.5.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.5.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.5.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.6.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.6.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.6.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.6.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.6.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.6.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.6.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.6.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.7.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.7.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.7.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.7.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.7.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.7.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.7.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.7.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.8.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.8.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.8.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.8.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.8.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.8.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.8.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.8.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.9.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.9.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.9.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.9.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.9.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.9.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.9.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.9.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.10.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.10.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.10.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.10.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.10.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.10.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.10.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.10.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:blocks.11.norm1.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm1.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.attn.qkv.weight shape:torch.Size([2304, 768])\n",
      "Param Name:blocks.11.attn.qkv.bias shape:torch.Size([2304])\n",
      "Param Name:blocks.11.attn.proj.weight shape:torch.Size([768, 768])\n",
      "Param Name:blocks.11.attn.proj.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.weight shape:torch.Size([768])\n",
      "Param Name:blocks.11.norm2.bias shape:torch.Size([768])\n",
      "Param Name:blocks.11.mlp.gate.w_gating shape:torch.Size([768, 4])\n",
      "Param Name:blocks.11.mlp.experts.w1 shape:torch.Size([4, 768, 3072])\n",
      "Param Name:blocks.11.mlp.experts.w2 shape:torch.Size([4, 3072, 768])\n",
      "Param Name:norm.weight shape:torch.Size([768])\n",
      "Param Name:norm.bias shape:torch.Size([768])\n",
      "Param Name:head.weight shape:torch.Size([1000, 768])\n",
      "Param Name:head.bias shape:torch.Size([1000])\n",
      "Total parameters: 256.427752 Millon\n"
     ]
    }
   ],
   "source": [
    "print_parameters(ViTMoE(num_experts=4))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "从参数上来看是一模一样的"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 寻找实现好的ViT\n",
    "待办"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 使用更复杂数据集\n",
    "\n",
    "拟使用TinyImnet 从Huggingface上面下了一个，将其包装成VisionDataset\n",
    "\n",
    "尝试Load一下, "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from data import construct_dataset\n",
    "\n",
    "tm_train,tm_test = construct_dataset(\"tiny-imagenet\", \"/home/ubuntu/ssk/mixture-of-experts/datasets/tiny-imagenet\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[[ 0.8104,  0.8276,  0.8276,  ..., -1.0048, -1.0048, -1.0048],\n",
       "          [ 0.8104,  0.8276,  0.8276,  ..., -1.0562, -1.0562, -1.0562],\n",
       "          [ 0.8104,  0.8276,  0.8276,  ..., -1.0562, -1.0562, -1.0562],\n",
       "          ...,\n",
       "          [ 1.4269,  1.4269,  1.4269,  ...,  2.0605,  2.0605,  2.0605],\n",
       "          [ 1.4269,  1.4269,  1.4269,  ...,  2.0605,  2.0605,  2.0605],\n",
       "          [ 1.4269,  1.4269,  1.4269,  ...,  2.0605,  2.0605,  2.0605]],\n",
       " \n",
       "         [[ 0.0476,  0.0476,  0.0476,  ..., -0.5126, -0.5476, -0.5476],\n",
       "          [ 0.0476,  0.0476,  0.0476,  ..., -0.5476, -0.5476, -0.5476],\n",
       "          [ 0.0476,  0.0476,  0.0476,  ..., -0.5476, -0.5476, -0.5476],\n",
       "          ...,\n",
       "          [ 1.1506,  1.1506,  1.1506,  ...,  1.2031,  1.2031,  1.2031],\n",
       "          [ 1.1506,  1.1506,  1.1506,  ...,  1.2031,  1.2031,  1.2031],\n",
       "          [ 1.1331,  1.1331,  1.1331,  ...,  1.2031,  1.2031,  1.2031]],\n",
       " \n",
       "         [[ 2.6400,  2.6400,  2.6400,  ..., -1.0201, -1.0201, -1.0201],\n",
       "          [ 2.6400,  2.6400,  2.6400,  ..., -1.0724, -1.0724, -1.0724],\n",
       "          [ 2.6400,  2.6400,  2.6400,  ..., -1.0724, -1.0724, -1.0724],\n",
       "          ...,\n",
       "          [ 0.7402,  0.7402,  0.7402,  ...,  1.3502,  1.3502,  1.3502],\n",
       "          [ 0.7228,  0.7228,  0.7228,  ...,  1.4200,  1.4200,  1.4200],\n",
       "          [ 0.7228,  0.7228,  0.7228,  ...,  1.4200,  1.4200,  1.4200]]]),\n",
       " 0)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tm_train[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "cf_train,cf_test = construct_dataset(\"CIFAR10\",\"../datasets\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[[ 0.2624,  0.2796,  0.2796,  ...,  0.2796,  0.2967,  0.3138],\n",
       "          [ 0.2624,  0.2796,  0.2796,  ...,  0.2796,  0.2967,  0.3138],\n",
       "          [ 0.2624,  0.2796,  0.2796,  ...,  0.2796,  0.2967,  0.3138],\n",
       "          ...,\n",
       "          [-1.4500, -1.4500, -1.4500,  ..., -1.5870, -1.5870, -1.5870],\n",
       "          [-1.4500, -1.4500, -1.4500,  ..., -1.5870, -1.5870, -1.5870],\n",
       "          [-1.4500, -1.4500, -1.4500,  ..., -1.5870, -1.5870, -1.5870]],\n",
       " \n",
       "         [[-0.1450, -0.1450, -0.1275,  ..., -0.2150, -0.2150, -0.2150],\n",
       "          [-0.1450, -0.1450, -0.1099,  ..., -0.2150, -0.2150, -0.2150],\n",
       "          [-0.1275, -0.1275, -0.1099,  ..., -0.1975, -0.2150, -0.2150],\n",
       "          ...,\n",
       "          [-0.4601, -0.4601, -0.4601,  ..., -0.5651, -0.5651, -0.5651],\n",
       "          [-0.4601, -0.4601, -0.4601,  ..., -0.5651, -0.5651, -0.5651],\n",
       "          [-0.4601, -0.4601, -0.4601,  ..., -0.5651, -0.5476, -0.5476]],\n",
       " \n",
       "         [[-0.8458, -0.8633, -0.8633,  ..., -0.9330, -0.9330, -0.9330],\n",
       "          [-0.8458, -0.8633, -0.8633,  ..., -0.9330, -0.9330, -0.9330],\n",
       "          [-0.8284, -0.8458, -0.8458,  ..., -0.9330, -0.9330, -0.9330],\n",
       "          ...,\n",
       "          [ 0.6356,  0.6356,  0.6182,  ...,  0.4265,  0.4265,  0.4265],\n",
       "          [ 0.6356,  0.6356,  0.6182,  ...,  0.4265,  0.4265,  0.4265],\n",
       "          [ 0.6356,  0.6356,  0.6182,  ...,  0.4439,  0.4439,  0.4439]]]),\n",
       " 3)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cf_test[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5.重构代码\n",
    "1. 增加CosineLRScheduler\n",
    "2. 分离配置文件\n",
    "3. 优化代码中不合理的地方"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "from args import args\n",
    "\n",
    "config = args()\n",
    "config.from_yaml_file(\"/home/ubuntu/ssk/mixture-of-experts/configs/template.yaml\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'data_root': '/home/ubuntu/ssk/mixture-of-experts/datasets/tiny-imagenet',\n",
       " 'dataset_name': 'tiny-imagenet',\n",
       " 'batch_size': 256,\n",
       " 'moe_model': models.vit_moe.ViTMoE,\n",
       " 'model_kwargs': {'embed_dim': 256,\n",
       "  'depth': 6,\n",
       "  'num_experts': 7,\n",
       "  'num_classes': 200,\n",
       "  'num_heads': 8,\n",
       "  'share_dim': 128},\n",
       " 'loss_func': torch.nn.modules.loss.CrossEntropyLoss,\n",
       " 'lr': '4e-4',\n",
       " 'expert_loss_coeff': 1,\n",
       " 'weight_decay': 0.08,\n",
       " 'epoch': 1500,\n",
       " 'device': 'cuda:1'}"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "config.to_dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
