{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch as t\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = nn.Sequential(\n",
    "    nn.Linear(10,5),\n",
    "    nn.ReLU(),\n",
    "    nn.Linear(5,1),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "input_data = t.randn(32,10) # batch size =32 \n",
    "\n",
    "output = net.forward(input_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<AddmmBackward0 at 0x7fd8167e5fa0>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output.grad_fn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "output.sum().backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "name: 0.weight, grad: tensor([[-4.1748e-01,  9.1356e-01, -1.4688e-01, -1.3940e+00, -3.4784e-01,\n",
      "         -9.0919e-01, -9.4736e-01,  7.1315e-01, -2.6506e-01, -3.8371e-01],\n",
      "        [ 9.9742e-02,  2.3168e-01,  2.6091e-02,  1.2247e-02, -8.9026e-02,\n",
      "         -4.6864e-02, -1.4436e-01,  1.9119e-02,  2.4017e-02,  3.3683e-02],\n",
      "        [-1.5635e+00, -3.7141e-01,  2.2817e-01, -1.1161e+00, -3.6120e-01,\n",
      "          1.0304e+00,  1.7790e-01, -1.6092e+00, -8.2331e-01, -1.1945e+00],\n",
      "        [-1.0297e-03, -1.3382e+00, -2.4232e+00,  5.2941e-01,  1.6868e+00,\n",
      "          2.5292e+00,  1.8308e+00, -1.3766e+00,  1.8023e+00,  4.1578e-02],\n",
      "        [-1.8486e+00,  2.9883e+00,  3.6755e+00,  7.7078e-01, -6.1005e-01,\n",
      "         -2.2305e-01, -8.6422e-01, -1.1172e+00, -3.3051e+00, -3.2235e-01]])\n",
      "name: 0.bias, grad: tensor([-1.8982, -0.2372, -3.4349,  3.9474, -6.8342])\n",
      "name: 2.weight, grad: tensor([[10.0838,  3.8045,  9.1054,  7.7497,  9.7475]])\n",
      "name: 2.bias, grad: tensor([32.])\n"
     ]
    }
   ],
   "source": [
    "# check gradient\n",
    "for name,param in net.named_parameters():\n",
    "    print(\"name: {}, grad: {}\".format(name,param.grad))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# zero grad\n",
    "for name,param in net.named_parameters():\n",
    "    param.grad=None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "name: 0.weight, grad: None\n",
      "name: 0.bias, grad: None\n",
      "name: 2.weight, grad: None\n",
      "name: 2.bias, grad: None\n"
     ]
    }
   ],
   "source": [
    "# check gradient\n",
    "for name,param in net.named_parameters():\n",
    "    print(\"name: {}, grad: {}\".format(name,param.grad))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Freeze the Linear1\n",
    "net[0].weight.requires_grad=False\n",
    "net[0].bias.requires_grad=False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "output = net.forward(input_data)\n",
    "output.sum().backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "name: 0.weight, grad: None\n",
      "name: 0.bias, grad: None\n",
      "name: 2.weight, grad: tensor([[10.0838,  3.8045,  9.1054,  7.7497,  9.7475]])\n",
      "name: 2.bias, grad: tensor([32.])\n"
     ]
    }
   ],
   "source": [
    "# check gradient\n",
    "for name,param in net.named_parameters():\n",
    "    print(\"name: {}, grad: {}\".format(name,param.grad))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# A more general approach to disable gradient computation\n",
    "def disableGradient(module:nn.Module)->None:\n",
    "    for name,param in module.named_parameters():\n",
    "        print(\"disabling gradient computation of \" + name)\n",
    "        param.requires_grad = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "disabling gradient computation of weight\n",
      "disabling gradient computation of bias\n"
     ]
    }
   ],
   "source": [
    "net=nn.Sequential(\n",
    "    nn.Linear(10,5),\n",
    "    nn.ReLU(),\n",
    "    nn.Linear(5,1),\n",
    ")\n",
    "disableGradient(net[2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "output = net.forward(input_data)\n",
    "output.sum().backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "name: 0.weight, grad: tensor([[-0.0849, -4.3574, -1.5220,  2.0752, -1.1542,  1.5013,  1.3638, -1.9050,\n",
      "          4.0026, -0.5688],\n",
      "        [ 0.3826,  0.4420, -0.6834, -0.3267,  0.4703, -0.0209, -0.1593,  0.3566,\n",
      "         -0.0233, -0.6669],\n",
      "        [ 1.9095, -0.7768, -1.5918,  2.2522, -0.8887,  0.5377,  0.0776, -0.0323,\n",
      "          1.8929,  0.0581],\n",
      "        [-0.5992,  2.7724, -0.7288, -3.3561, -2.4699,  1.2476, -2.9248, -1.7598,\n",
      "         -3.9295, -2.8439],\n",
      "        [-3.5364, -1.5058,  2.8322, -2.8914, -1.1536,  0.7339, -0.0729, -3.0793,\n",
      "         -0.3644, -0.6493]])\n",
      "name: 0.bias, grad: tensor([ 4.1395,  0.8110,  3.1153, -6.7865, -4.7125])\n",
      "name: 2.weight, grad: None\n",
      "name: 2.bias, grad: None\n"
     ]
    }
   ],
   "source": [
    "# check gradient\n",
    "for name,param in net.named_parameters():\n",
    "    print(\"name: {}, grad: {}\".format(name,param.grad))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "name: cls_token, grad: None\n",
      "name: pos_embed, grad: None\n",
      "name: patch_embed.proj.weight, grad: None\n",
      "name: patch_embed.proj.bias, grad: None\n",
      "name: blocks.0.norm1.weight, grad: None\n",
      "name: blocks.0.norm1.bias, grad: None\n",
      "name: blocks.0.attn.qkv.weight, grad: None\n",
      "name: blocks.0.attn.qkv.bias, grad: None\n",
      "name: blocks.0.attn.proj.weight, grad: None\n",
      "name: blocks.0.attn.proj.bias, grad: None\n",
      "name: blocks.0.norm2.weight, grad: None\n",
      "name: blocks.0.norm2.bias, grad: None\n",
      "name: blocks.0.mlp.gate.w_gating, grad: None\n",
      "name: blocks.0.mlp.experts.w1, grad: None\n",
      "name: blocks.0.mlp.experts.w2, grad: None\n",
      "name: blocks.1.norm1.weight, grad: None\n",
      "name: blocks.1.norm1.bias, grad: None\n",
      "name: blocks.1.attn.qkv.weight, grad: None\n",
      "name: blocks.1.attn.qkv.bias, grad: None\n",
      "name: blocks.1.attn.proj.weight, grad: None\n",
      "name: blocks.1.attn.proj.bias, grad: None\n",
      "name: blocks.1.norm2.weight, grad: None\n",
      "name: blocks.1.norm2.bias, grad: None\n",
      "name: blocks.1.mlp.gate.w_gating, grad: None\n",
      "name: blocks.1.mlp.experts.w1, grad: None\n",
      "name: blocks.1.mlp.experts.w2, grad: None\n",
      "name: blocks.2.norm1.weight, grad: None\n",
      "name: blocks.2.norm1.bias, grad: None\n",
      "name: blocks.2.attn.qkv.weight, grad: None\n",
      "name: blocks.2.attn.qkv.bias, grad: None\n",
      "name: blocks.2.attn.proj.weight, grad: None\n",
      "name: blocks.2.attn.proj.bias, grad: None\n",
      "name: blocks.2.norm2.weight, grad: None\n",
      "name: blocks.2.norm2.bias, grad: None\n",
      "name: blocks.2.mlp.gate.w_gating, grad: None\n",
      "name: blocks.2.mlp.experts.w1, grad: None\n",
      "name: blocks.2.mlp.experts.w2, grad: None\n",
      "name: blocks.3.norm1.weight, grad: None\n",
      "name: blocks.3.norm1.bias, grad: None\n",
      "name: blocks.3.attn.qkv.weight, grad: None\n",
      "name: blocks.3.attn.qkv.bias, grad: None\n",
      "name: blocks.3.attn.proj.weight, grad: None\n",
      "name: blocks.3.attn.proj.bias, grad: None\n",
      "name: blocks.3.norm2.weight, grad: None\n",
      "name: blocks.3.norm2.bias, grad: None\n",
      "name: blocks.3.mlp.gate.w_gating, grad: None\n",
      "name: blocks.3.mlp.experts.w1, grad: None\n",
      "name: blocks.3.mlp.experts.w2, grad: None\n",
      "name: blocks.4.norm1.weight, grad: None\n",
      "name: blocks.4.norm1.bias, grad: None\n",
      "name: blocks.4.attn.qkv.weight, grad: None\n",
      "name: blocks.4.attn.qkv.bias, grad: None\n",
      "name: blocks.4.attn.proj.weight, grad: None\n",
      "name: blocks.4.attn.proj.bias, grad: None\n",
      "name: blocks.4.norm2.weight, grad: None\n",
      "name: blocks.4.norm2.bias, grad: None\n",
      "name: blocks.4.mlp.gate.w_gating, grad: None\n",
      "name: blocks.4.mlp.experts.w1, grad: None\n",
      "name: blocks.4.mlp.experts.w2, grad: None\n",
      "name: blocks.5.norm1.weight, grad: None\n",
      "name: blocks.5.norm1.bias, grad: None\n",
      "name: blocks.5.attn.qkv.weight, grad: None\n",
      "name: blocks.5.attn.qkv.bias, grad: None\n",
      "name: blocks.5.attn.proj.weight, grad: None\n",
      "name: blocks.5.attn.proj.bias, grad: None\n",
      "name: blocks.5.norm2.weight, grad: None\n",
      "name: blocks.5.norm2.bias, grad: None\n",
      "name: blocks.5.mlp.gate.w_gating, grad: None\n",
      "name: blocks.5.mlp.experts.w1, grad: None\n",
      "name: blocks.5.mlp.experts.w2, grad: None\n",
      "name: norm.weight, grad: None\n",
      "name: norm.bias, grad: None\n",
      "name: head.weight, grad: None\n",
      "name: head.bias, grad: None\n",
      "yes\n",
      "tensor([[-1.2608,  1.6330, -0.7980,  0.7869],\n",
      "        [-1.2831, -0.3052, -0.3544,  1.4484],\n",
      "        [ 0.1232, -0.6164,  0.3515,  1.5803],\n",
      "        ...,\n",
      "        [ 0.1749, -1.3812, -2.3535, -1.0489],\n",
      "        [-0.3743,  0.6283,  0.5580, -0.1640],\n",
      "        [-0.5767, -0.4258, -0.4698, -2.1158]])\n",
      "cls_toke2\n",
      "pos_embe2\n",
      "patch_embed.proj.weigh2\n",
      "patch_embed.proj.bia2\n",
      "blocks.0.norm1.weigh2\n",
      "blocks.0.norm1.bia2\n",
      "blocks.0.attn.qkv.weigh2\n",
      "blocks.0.attn.qkv.bia2\n",
      "blocks.0.attn.proj.weigh2\n",
      "blocks.0.attn.proj.bia2\n",
      "blocks.0.norm2.weigh2\n",
      "blocks.0.norm2.bia2\n",
      "blocks.0.mlp.gate.w_gatin2\n",
      "blocks.0.mlp.experts.w2\n",
      "blocks.0.mlp.experts.w2\n",
      "blocks.1.norm1.weigh2\n",
      "blocks.1.norm1.bia2\n",
      "blocks.1.attn.qkv.weigh2\n",
      "blocks.1.attn.qkv.bia2\n",
      "blocks.1.attn.proj.weigh2\n",
      "blocks.1.attn.proj.bia2\n",
      "blocks.1.norm2.weigh2\n",
      "blocks.1.norm2.bia2\n",
      "blocks.1.mlp.gate.w_gatin2\n",
      "blocks.1.mlp.experts.w2\n",
      "blocks.1.mlp.experts.w2\n",
      "blocks.2.norm1.weigh2\n",
      "blocks.2.norm1.bia2\n",
      "blocks.2.attn.qkv.weigh2\n",
      "blocks.2.attn.qkv.bia2\n",
      "blocks.2.attn.proj.weigh2\n",
      "blocks.2.attn.proj.bia2\n",
      "blocks.2.norm2.weigh2\n",
      "blocks.2.norm2.bia2\n",
      "blocks.2.mlp.gate.w_gatin2\n",
      "blocks.2.mlp.experts.w2\n",
      "blocks.2.mlp.experts.w2\n",
      "blocks.3.norm1.weigh2\n",
      "blocks.3.norm1.bia2\n",
      "blocks.3.attn.qkv.weigh2\n",
      "blocks.3.attn.qkv.bia2\n",
      "blocks.3.attn.proj.weigh2\n",
      "blocks.3.attn.proj.bia2\n",
      "blocks.3.norm2.weigh2\n",
      "blocks.3.norm2.bia2\n",
      "blocks.3.mlp.gate.w_gatin2\n",
      "blocks.3.mlp.experts.w2\n",
      "blocks.3.mlp.experts.w2\n",
      "blocks.4.norm1.weigh2\n",
      "blocks.4.norm1.bia2\n",
      "blocks.4.attn.qkv.weigh2\n",
      "blocks.4.attn.qkv.bia2\n",
      "blocks.4.attn.proj.weigh2\n",
      "blocks.4.attn.proj.bia2\n",
      "blocks.4.norm2.weigh2\n",
      "blocks.4.norm2.bia2\n",
      "blocks.4.mlp.gate.w_gatin2\n",
      "blocks.4.mlp.experts.w2\n",
      "blocks.4.mlp.experts.w2\n",
      "blocks.5.norm1.weigh2\n",
      "blocks.5.norm1.bia2\n",
      "blocks.5.attn.qkv.weigh2\n",
      "blocks.5.attn.qkv.bia2\n",
      "blocks.5.attn.proj.weigh2\n",
      "blocks.5.attn.proj.bia2\n",
      "blocks.5.norm2.weigh2\n",
      "blocks.5.norm2.bia2\n",
      "blocks.5.mlp.gate.w_gatin2\n",
      "blocks.5.mlp.experts.w2\n",
      "blocks.5.mlp.experts.w2\n",
      "norm.weigh2\n",
      "norm.bia2\n",
      "head.weigh2\n",
      "head.bia2\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "1.0"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import sys\n",
    "sys.path.append('..')\n",
    "from models.vit_moe import ViTMoE\n",
    "import numpy as np\n",
    "modell:nn.Module= ViTMoE(embed_dim=256, depth=6, num_experts=4, num_classes=10, num_heads=8)\n",
    "for name,param in modell.named_parameters():\n",
    "    print(\"name: {}, grad: {}\".format(name,param.grad))\n",
    "a = 'blocks.0.mlp.gate.w_gating'\n",
    "for name,param in modell.named_parameters():\n",
    "    if a == name:\n",
    "        print('yes')\n",
    "    # if \"experts\" in name:\n",
    "    #     param[0].grad.zero_()# with shape [n_epxerts,n_input,n_output]\n",
    "tmp_model_para = {}\n",
    "for name,param in modell.named_parameters():\n",
    "    tmp_model_para[name] = param.detach()\n",
    "print(tmp_model_para['blocks.0.mlp.gate.w_gating'])\n",
    "for key in tmp_model_para:\n",
    "    print(key[:-1] + '2')\n",
    "len(modell.state_dict()['blocks.0.mlp.experts.w1'])\n",
    "cos = nn.CosineSimilarity(dim=0)\n",
    "cos(modell.state_dict()['blocks.0.mlp.experts.w1'][0],tmp_model_para['blocks.0.mlp.experts.w1'][0]).mean().item()\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
