{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1a7c43f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.nn import functional as F\n",
    "\n",
    "x = torch.arange(4)\n",
    "torch.save(x, 'x-file')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2a241c6b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0, 1, 2, 3])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x2 = torch.load('x-file')\n",
    "x2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "53bff855",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([0, 1, 2, 3]), tensor([0., 0., 0., 0.]))"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y = torch.zeros(4)\n",
    "torch.save([x, y],'x-files')\n",
    "x2, y2 = torch.load('x-files')\n",
    "(x2, y2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a5ca4dba",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'x': tensor([0, 1, 2, 3]), 'y': tensor([0., 0., 0., 0.])}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mydict = {'x': x, 'y': y}\n",
    "torch.save(mydict, 'mydict')\n",
    "mydict2 = torch.load('mydict')\n",
    "mydict2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "dcea69b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.hidden = nn.Linear(20, 256)\n",
    "        self.output = nn.Linear(256, 10)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.output(F.relu(self.hidden(x)))\n",
    "\n",
    "net = MLP()\n",
    "X = torch.randn(size=(2, 20))\n",
    "Y = net(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2d927935",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "OrderedDict([('hidden.weight', tensor([[-0.0138,  0.2190,  0.1004,  ...,  0.2217, -0.0898,  0.0632],\n",
      "        [-0.1763, -0.1889,  0.0598,  ...,  0.0630, -0.1824,  0.1483],\n",
      "        [ 0.2161, -0.1546, -0.1833,  ..., -0.0160, -0.0202,  0.1946],\n",
      "        ...,\n",
      "        [ 0.0797, -0.2167, -0.0196,  ..., -0.1880, -0.1656, -0.2123],\n",
      "        [ 0.1190, -0.2204,  0.1914,  ..., -0.2212, -0.0187, -0.2074],\n",
      "        [ 0.1436, -0.1981,  0.1307,  ..., -0.0136, -0.1021, -0.1707]])), ('hidden.bias', tensor([ 0.0786, -0.1127, -0.0726, -0.1052, -0.0401,  0.1295, -0.0569, -0.1251,\n",
      "         0.0270,  0.2063, -0.1521,  0.0250,  0.0245,  0.1408,  0.1246,  0.0089,\n",
      "        -0.0703,  0.1703, -0.2049, -0.1471, -0.0925, -0.1817,  0.1110, -0.0560,\n",
      "        -0.2199,  0.1231, -0.1478, -0.2119, -0.1934, -0.0579,  0.0553,  0.1150,\n",
      "        -0.0262,  0.1668, -0.2235,  0.1604,  0.1575, -0.0879,  0.2166,  0.0795,\n",
      "         0.1748, -0.0010,  0.1358, -0.1939,  0.1334,  0.0457, -0.1682, -0.0026,\n",
      "         0.0664,  0.1823,  0.0189,  0.0243,  0.1660,  0.1021,  0.0447,  0.1730,\n",
      "        -0.0780, -0.1785,  0.0403, -0.0425, -0.0140,  0.0939, -0.2007,  0.0778,\n",
      "        -0.2014,  0.1055,  0.0033, -0.1646, -0.1323, -0.1532,  0.2072, -0.0426,\n",
      "        -0.0801,  0.0796, -0.0362, -0.1148, -0.0646, -0.0026,  0.1224,  0.2135,\n",
      "         0.0626,  0.1599,  0.0499,  0.0294, -0.0528,  0.2197, -0.1125,  0.1181,\n",
      "         0.0615,  0.1791, -0.1839,  0.1091,  0.0590,  0.1798, -0.1466, -0.1743,\n",
      "        -0.1635,  0.1584,  0.0770,  0.0150, -0.1877,  0.0215,  0.2178, -0.0260,\n",
      "         0.2050,  0.1058, -0.0528, -0.2224,  0.0571,  0.2074, -0.1086,  0.2067,\n",
      "         0.1416,  0.1770,  0.1499,  0.1592, -0.1266,  0.0022,  0.0849,  0.1521,\n",
      "        -0.1302,  0.0244, -0.1645,  0.1824, -0.0746,  0.0513, -0.0695,  0.0487,\n",
      "        -0.2086, -0.0675,  0.0504,  0.1678, -0.2218,  0.2019, -0.1676, -0.1791,\n",
      "         0.1333,  0.0405, -0.1238,  0.1462,  0.0205, -0.1640,  0.2097, -0.1290,\n",
      "        -0.0551,  0.1006, -0.1422,  0.0073,  0.0468, -0.1963, -0.1205,  0.0474,\n",
      "        -0.1283,  0.2206,  0.0151, -0.1041, -0.0822, -0.0853,  0.1024,  0.0465,\n",
      "        -0.2012, -0.1983,  0.2213,  0.0764,  0.2232,  0.0819, -0.0387, -0.0398,\n",
      "        -0.0231,  0.0879, -0.1078,  0.2008, -0.1331, -0.1013, -0.1259, -0.1275,\n",
      "        -0.2121,  0.0280,  0.1774,  0.1671, -0.1159,  0.0018,  0.1789,  0.1148,\n",
      "        -0.1033,  0.1821,  0.0781,  0.0636, -0.1366, -0.0659,  0.1806,  0.0453,\n",
      "         0.1657, -0.1825,  0.0123, -0.1602, -0.0038, -0.1963,  0.1596,  0.0938,\n",
      "         0.1930, -0.0045,  0.1303,  0.1232,  0.0488,  0.1131,  0.2062, -0.1517,\n",
      "         0.1784,  0.1352, -0.1060,  0.0591,  0.0268,  0.1355, -0.1742, -0.1034,\n",
      "         0.0223, -0.1036, -0.0872, -0.1891, -0.1908, -0.1012, -0.1425,  0.1132,\n",
      "         0.0984,  0.1243, -0.1454,  0.1709, -0.2135,  0.2007,  0.1639, -0.0592,\n",
      "        -0.0432,  0.0564, -0.1784, -0.1536, -0.0361,  0.1033, -0.1028,  0.1973,\n",
      "         0.1794,  0.0471, -0.0186,  0.0442,  0.1869, -0.0760,  0.1792, -0.0766,\n",
      "         0.1643, -0.1426,  0.0191, -0.1062, -0.0284, -0.1877, -0.0650,  0.0353])), ('output.weight', tensor([[ 0.0428, -0.0542, -0.0613,  ..., -0.0463, -0.0320,  0.0495],\n",
      "        [-0.0354, -0.0454,  0.0040,  ...,  0.0398,  0.0498,  0.0275],\n",
      "        [ 0.0472, -0.0163,  0.0006,  ...,  0.0516,  0.0428,  0.0591],\n",
      "        ...,\n",
      "        [-0.0596,  0.0107,  0.0512,  ..., -0.0191,  0.0533,  0.0257],\n",
      "        [-0.0515, -0.0332, -0.0480,  ..., -0.0063, -0.0182, -0.0315],\n",
      "        [-0.0451, -0.0299, -0.0021,  ...,  0.0437, -0.0285,  0.0295]])), ('output.bias', tensor([-0.0445, -0.0537, -0.0160,  0.0239,  0.0301, -0.0060, -0.0424, -0.0251,\n",
      "        -0.0589,  0.0414]))])\n"
     ]
    }
   ],
   "source": [
    "print(net.state_dict())\n",
    "torch.save(net.state_dict(), 'mlp.params')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "45f4d17e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MLP(\n",
       "  (hidden): Linear(in_features=20, out_features=256, bias=True)\n",
       "  (output): Linear(in_features=256, out_features=10, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "clone = MLP()\n",
    "clone.load_state_dict(torch.load('mlp.params'))\n",
    "clone.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "0590c6ad",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "OrderedDict([('hidden.weight', tensor([[-0.0138,  0.2190,  0.1004,  ...,  0.2217, -0.0898,  0.0632],\n",
      "        [-0.1763, -0.1889,  0.0598,  ...,  0.0630, -0.1824,  0.1483],\n",
      "        [ 0.2161, -0.1546, -0.1833,  ..., -0.0160, -0.0202,  0.1946],\n",
      "        ...,\n",
      "        [ 0.0797, -0.2167, -0.0196,  ..., -0.1880, -0.1656, -0.2123],\n",
      "        [ 0.1190, -0.2204,  0.1914,  ..., -0.2212, -0.0187, -0.2074],\n",
      "        [ 0.1436, -0.1981,  0.1307,  ..., -0.0136, -0.1021, -0.1707]])), ('hidden.bias', tensor([ 0.0786, -0.1127, -0.0726, -0.1052, -0.0401,  0.1295, -0.0569, -0.1251,\n",
      "         0.0270,  0.2063, -0.1521,  0.0250,  0.0245,  0.1408,  0.1246,  0.0089,\n",
      "        -0.0703,  0.1703, -0.2049, -0.1471, -0.0925, -0.1817,  0.1110, -0.0560,\n",
      "        -0.2199,  0.1231, -0.1478, -0.2119, -0.1934, -0.0579,  0.0553,  0.1150,\n",
      "        -0.0262,  0.1668, -0.2235,  0.1604,  0.1575, -0.0879,  0.2166,  0.0795,\n",
      "         0.1748, -0.0010,  0.1358, -0.1939,  0.1334,  0.0457, -0.1682, -0.0026,\n",
      "         0.0664,  0.1823,  0.0189,  0.0243,  0.1660,  0.1021,  0.0447,  0.1730,\n",
      "        -0.0780, -0.1785,  0.0403, -0.0425, -0.0140,  0.0939, -0.2007,  0.0778,\n",
      "        -0.2014,  0.1055,  0.0033, -0.1646, -0.1323, -0.1532,  0.2072, -0.0426,\n",
      "        -0.0801,  0.0796, -0.0362, -0.1148, -0.0646, -0.0026,  0.1224,  0.2135,\n",
      "         0.0626,  0.1599,  0.0499,  0.0294, -0.0528,  0.2197, -0.1125,  0.1181,\n",
      "         0.0615,  0.1791, -0.1839,  0.1091,  0.0590,  0.1798, -0.1466, -0.1743,\n",
      "        -0.1635,  0.1584,  0.0770,  0.0150, -0.1877,  0.0215,  0.2178, -0.0260,\n",
      "         0.2050,  0.1058, -0.0528, -0.2224,  0.0571,  0.2074, -0.1086,  0.2067,\n",
      "         0.1416,  0.1770,  0.1499,  0.1592, -0.1266,  0.0022,  0.0849,  0.1521,\n",
      "        -0.1302,  0.0244, -0.1645,  0.1824, -0.0746,  0.0513, -0.0695,  0.0487,\n",
      "        -0.2086, -0.0675,  0.0504,  0.1678, -0.2218,  0.2019, -0.1676, -0.1791,\n",
      "         0.1333,  0.0405, -0.1238,  0.1462,  0.0205, -0.1640,  0.2097, -0.1290,\n",
      "        -0.0551,  0.1006, -0.1422,  0.0073,  0.0468, -0.1963, -0.1205,  0.0474,\n",
      "        -0.1283,  0.2206,  0.0151, -0.1041, -0.0822, -0.0853,  0.1024,  0.0465,\n",
      "        -0.2012, -0.1983,  0.2213,  0.0764,  0.2232,  0.0819, -0.0387, -0.0398,\n",
      "        -0.0231,  0.0879, -0.1078,  0.2008, -0.1331, -0.1013, -0.1259, -0.1275,\n",
      "        -0.2121,  0.0280,  0.1774,  0.1671, -0.1159,  0.0018,  0.1789,  0.1148,\n",
      "        -0.1033,  0.1821,  0.0781,  0.0636, -0.1366, -0.0659,  0.1806,  0.0453,\n",
      "         0.1657, -0.1825,  0.0123, -0.1602, -0.0038, -0.1963,  0.1596,  0.0938,\n",
      "         0.1930, -0.0045,  0.1303,  0.1232,  0.0488,  0.1131,  0.2062, -0.1517,\n",
      "         0.1784,  0.1352, -0.1060,  0.0591,  0.0268,  0.1355, -0.1742, -0.1034,\n",
      "         0.0223, -0.1036, -0.0872, -0.1891, -0.1908, -0.1012, -0.1425,  0.1132,\n",
      "         0.0984,  0.1243, -0.1454,  0.1709, -0.2135,  0.2007,  0.1639, -0.0592,\n",
      "        -0.0432,  0.0564, -0.1784, -0.1536, -0.0361,  0.1033, -0.1028,  0.1973,\n",
      "         0.1794,  0.0471, -0.0186,  0.0442,  0.1869, -0.0760,  0.1792, -0.0766,\n",
      "         0.1643, -0.1426,  0.0191, -0.1062, -0.0284, -0.1877, -0.0650,  0.0353])), ('output.weight', tensor([[ 0.0428, -0.0542, -0.0613,  ..., -0.0463, -0.0320,  0.0495],\n",
      "        [-0.0354, -0.0454,  0.0040,  ...,  0.0398,  0.0498,  0.0275],\n",
      "        [ 0.0472, -0.0163,  0.0006,  ...,  0.0516,  0.0428,  0.0591],\n",
      "        ...,\n",
      "        [-0.0596,  0.0107,  0.0512,  ..., -0.0191,  0.0533,  0.0257],\n",
      "        [-0.0515, -0.0332, -0.0480,  ..., -0.0063, -0.0182, -0.0315],\n",
      "        [-0.0451, -0.0299, -0.0021,  ...,  0.0437, -0.0285,  0.0295]])), ('output.bias', tensor([-0.0445, -0.0537, -0.0160,  0.0239,  0.0301, -0.0060, -0.0424, -0.0251,\n",
      "        -0.0589,  0.0414]))])\n"
     ]
    }
   ],
   "source": [
    "print(clone.state_dict())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e54ac732",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[True, True, True, True, True, True, True, True, True, True],\n",
       "        [True, True, True, True, True, True, True, True, True, True]])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_clone = clone(X)\n",
    "Y_clone == Y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32b19c4d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
