{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "\n",
    "max_seq_len = 10\n",
    "\n",
    "mask = torch.full((1, 1, max_seq_len, max_seq_len), fill_value=float('-inf'))\n",
    "mask.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mask2 = torch.triu(mask, 12)\n",
    "mask2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch_size, seq_len, n_heads, head_dim // 2, 1\n",
    "xq_out_r = torch.randn(1, 1, 10, 2, 1)\n",
    "xq_out_i = torch.randn(1, 1, 10, 2, 1)\n",
    "a= torch.stack([xq_out_r, xq_out_i], dim=-1)\n",
    "shape1 = a.shape\n",
    "a = a.flatten(3)\n",
    "shape2 = a.shape\n",
    "print(shape1, shape2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "batch_size = 1\n",
    "max_seq_len = 2\n",
    "n_kv_heads = 2\n",
    "head_dim = 2\n",
    "n_rep = 2\n",
    "x = torch.rand(batch_size, max_seq_len, n_kv_heads, head_dim).transpose(1, 2)\n",
    "y = torch.rand(batch_size, max_seq_len, n_kv_heads, head_dim).transpose(1, 2)\n",
    "scores = torch.matmul(x, y.transpose(2, 3))\n",
    "print(x.shape)\n",
    "print(y.shape)\n",
    "\n",
    "scores.shape\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "class ModelArgs:\n",
    "    def __init__(self, dim, eps, dropout):\n",
    "        self.dim = dim\n",
    "        self.eps = eps\n",
    "        self.dropout = dropout\n",
    "        \n",
    "\n",
    "class RMSNorm(nn.Module):\n",
    "    def __init__(self, dim : int, eps : float):\n",
    "        super().__init__()\n",
    "        self.eps = eps\n",
    "        self.weight = nn.Parameter(torch.ones(dim))\n",
    "        \n",
    "    def _norm(self, x : torch.Tensor):\n",
    "        return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)\n",
    "    \n",
    "    def forward(self, x : torch.Tensor):\n",
    "        output = self._norm(x.float().type_as(x))\n",
    "        return output.type_as(x) * self.weight.type_as(x)\n",
    "\n",
    "\n",
    "class SimpleNeuralNetwork(nn.Module):\n",
    "    def __init__(self, input_dim, output_dim, normalization_eps=1e-6):\n",
    "        super(SimpleNeuralNetwork, self).__init__()\n",
    "        self.linear = nn.Linear(input_dim, output_dim)  \n",
    "        self.norm = RMSNorm(output_dim, normalization_eps)  \n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.linear(x)  \n",
    "        x = self.norm(x)  \n",
    "        return x\n",
    "\n",
    "\n",
    "input_dim = 10\n",
    "output_dim = 10\n",
    "model = SimpleNeuralNetwork(input_dim, output_dim)\n",
    "\n",
    "\n",
    "input_tensor = torch.randn(5, input_dim)  \n",
    "\n",
    "output_tensor = model(input_tensor)\n",
    "\n",
    "print(\"Output Tensor:\")\n",
    "print(output_tensor.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "dim = 24\n",
    "theta : float = 10000.0\n",
    "freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n",
    "freqs.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "end = seq_len = 10\n",
    "t = torch.arange(end, device=freqs.device)\n",
    "t.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "freqs = torch.outer(t, freqs)\n",
    "freqs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "from dataclasses import dataclass\n",
    "from typing import Any, Optional, Tuple\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "\n",
    "@dataclass\n",
    "class ModelArgs:\n",
    "    # llama 7B model hyperparameters\n",
    "    dim : int = 4096\n",
    "    n_layers : int = 32\n",
    "    n_heads : int = 32\n",
    "    n_kv_heads : Optional[int] = None\n",
    "    vocab_size : int = 32000\n",
    "    hidden_dim : Optional[int] = None\n",
    "    # MLP hidden layer size will be multiple of \n",
    "    multiple_of : int = 256\n",
    "    norm_eps : float = 1e-5\n",
    "    max_seq_len : int = 2048\n",
    "    dropout : float = 0.0\n",
    "    \n",
    "def precompute_freqs_cis(dim : int, end : int, theta : float = 10000.0):\n",
    "    freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n",
    "    t = torch.arange(end, device=freqs.device)\n",
    "    freqs = torch.outer(t, freqs).float()\n",
    "    freqs_cos = torch.cos(freqs)\n",
    "    freqs_sin = torch.sin(freqs)\n",
    "    return freqs_cos, freqs_sin\n",
    "    # return shape : max_seq_len, dim // 2\n",
    "    \n",
    "def repeat_kv(\n",
    "    x : torch.Tensor,\n",
    "    n_rep : int\n",
    ") -> torch.Tensor:\n",
    "    batch_size, seq_len, n_kv_heads, head_dim = x.shape\n",
    "    if n_rep == 1:\n",
    "        return x\n",
    "    return (\n",
    "        x[:, :, :, None, :]\n",
    "        .expand(batch_size, seq_len, n_kv_heads, n_rep, head_dim)\n",
    "        .reshape(batch_size, seq_len, n_kv_heads * n_rep, head_dim)\n",
    "    )\n",
    "    \n",
    "def reshape_for_broadcasting(\n",
    "    freqs_cis : torch.Tensor, \n",
    "    x : torch.Tensor) -> torch.Tensor:\n",
    "    # freqs_cis : original freqs_sin or freqs_cos tensor\n",
    "    # x : xq_r or xq_i tensor shape is (batch_size, seq_len, n_heads, head_dim // 2, 1)\n",
    "    ndim = x.ndim\n",
    "    assert 0 <= 1 < ndim\n",
    "    # freqs_cis shape : seq_len, 1\n",
    "    # x shape : 1, 8, 32, 64\n",
    "    # freqs_cis shape : 8, 64\n",
    "    assert freqs_cis.shape == (x.shape[1], x.shape[-1])\n",
    "    shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]\n",
    "    # shape freqs_cis from (seq_len, 1) to (1, seq_len, 1, 1, 1)\n",
    "    return freqs_cis.view(shape)\n",
    "    \n",
    "def apply_rotary_pos_emb(\n",
    "    xq : torch.Tensor,\n",
    "    xk : torch.Tensor,\n",
    "    freqs_cos : torch.Tensor,\n",
    "    freqs_sin : torch.Tensor\n",
    ") -> Tuple[torch.Tensor, torch.Tensor]:\n",
    "    # input xq shape : batch_size, seq_len, n_heads, head_dim\n",
    "    # xq shape : 1, 8, 32, 128\n",
    "    # xk shape : 1, 8, 8, 128\n",
    "    \n",
    "    # reshape xq, xk to match the complex representation\n",
    "    xq_r, xq_i = xq.float().reshape(xq.shape[:-1] + (-1, 2)).unbind(-1)\n",
    "    # xq_r, xq_i shape : 1, 8, 32, 64\n",
    "    xk_r, xk_i = xk.float().reshape(xk.shape[:-1] + (-1, 2)).unbind(-1)\n",
    "    # xk_r, xk_i shape : 1, 8, 8, 64\n",
    "    \n",
    "    # xq_r shape : batch_size, seq_len, n_heads, head_dim // 2, 1\n",
    "    # xq_i shape : batch_size, seq_len, n_heads, head_dim // 2, 1\n",
    "    # reshape freqs_cos, freqs_sin for broadcasting\n",
    "    freqs_cos = reshape_for_broadcasting(freqs_cos, xq_r)\n",
    "    # input freqs_sin(cos) shape : (seq_len, 1)\n",
    "    # reshaped freqs_sin(cos) shape : (1, seq_len, 1, 1, 1)\n",
    "    freqs_sin = reshape_for_broadcasting(freqs_sin, xq_i)\n",
    "    \n",
    "    # apply rotary positional embedding\n",
    "    xq_out_r = xq_r * freqs_cos - xq_i * freqs_sin\n",
    "    xq_out_i = xq_r * freqs_sin + xq_i * freqs_cos\n",
    "    # shape : batch_size, seq_len, n_heads, head_dim // 2, 1\n",
    "    xk_out_r = xk_r * freqs_cos - xk_i * freqs_sin\n",
    "    xk_out_i = xk_r * freqs_sin + xk_i * freqs_cos\n",
    "    \n",
    "    # flatten last two dimension\n",
    "    # first stack the last two dimension of xq_out_r, xq_out_i\n",
    "    # stacked shape : batch_size, seq_len, n_heads, head_dim // 2, 1, 2\n",
    "    # keep first three dimension and flatten to the last\n",
    "    # flattened shape : batch_size, seq_len, n_heads, head_dim\n",
    "    xq_out = torch.stack([xq_out_r, xq_out_i], dim = -1).flatten(3)\n",
    "    xk_out = torch.stack([xk_out_r, xk_out_i], dim = -1).flatten(3)\n",
    "    \n",
    "    return xq_out.type_as(xq), xk_out.type_as(xk)\n",
    "    # return shape : batch_size, seq_len, n_heads, head_dim\n",
    "    \n",
    "class Attention(nn.Module):\n",
    "    def __init__(self, args : ModelArgs):\n",
    "        super().__init__()\n",
    "        self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads\n",
    "        assert args.n_heads % self.n_kv_heads == 0\n",
    "        model_parallel_size = 1\n",
    "        self.n_local_heads = args.n_heads // model_parallel_size\n",
    "        self.n_local_kv_heads = self.n_kv_heads // model_parallel_size\n",
    "        self.n_rep = self.n_local_heads // self.n_local_kv_heads\n",
    "        self.head_dim = args.dim // args.n_heads\n",
    "        self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)\n",
    "        self.wk = nn.Linear(args.dim, args.n_kv_heads * self.head_dim, bias=False)\n",
    "        self.wv = nn.Linear(args.dim, args.n_kv_heads * self.head_dim, bias=False)\n",
    "        self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)\n",
    "        self.atten_dropout = nn.Dropout(p = args.dropout)\n",
    "        self.residual_dropout = nn.Dropout(p = args.dropout)\n",
    "        self.dropout = args.dropout\n",
    "        \n",
    "        # use flash attention or manual implementation\n",
    "        self.flash = hasattr(torch.nn.functional, \"scaled_dot_product_attention\")\n",
    "        if not self.flash:\n",
    "            print(\"Using slow attention implementation. Flash Attention requires Pytorch >= 2.0\")\n",
    "            mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float('-inf'))\n",
    "            mask = torch.triu(mask, diagonal=1)\n",
    "            self.register_buffer('mask', mask)\n",
    "            \n",
    "    def forward(\n",
    "        self,\n",
    "        x : torch.Tensor,\n",
    "        freqs_cos : torch.Tensor,\n",
    "        freqs_sin : torch.Tensor\n",
    "    ):\n",
    "        # input shape : batch_size, seq_len, args.dim\n",
    "        # x shape : 1, 8, 4096\n",
    "        batch_size, seq_len, _ = x.shape\n",
    "        \n",
    "        #QKV\n",
    "        xq : torch.Tensor = self.wq(x)\n",
    "        # xq shape : 1, 8, 4096\n",
    "        xk : torch.Tensor = self.wk(x)\n",
    "        # xk shape : 1, 8, 1024\n",
    "        xv : torch.Tensor = self.wv(x)\n",
    "        # xv.shape : 1, 8, 1024\n",
    "        \n",
    "        # split QKV into heads\n",
    "        xq = xq.view(batch_size, seq_len, self.n_local_heads, self.head_dim)\n",
    "        # xq shape : 1, 8, 32, 128\n",
    "        xk = xk.view(batch_size, seq_len, self.n_kv_heads, self.head_dim)\n",
    "        # xk shape : 1, 8, 8, 128\n",
    "        xv = xv.view(batch_size, seq_len, self.n_kv_heads, self.head_dim)\n",
    "        # xv.shape : 1, 8, 8, 128\n",
    "        \n",
    "        # xq, xk shape : batch_size, seq_len, n_heads, head_dim\n",
    "        # RoPE relative positional embedding\n",
    "        xq, xk = apply_rotary_pos_emb(xq, xk, freqs_cos, freqs_sin)\n",
    "        \n",
    "        # grouped multiquery attention: expand out keys and values\n",
    "        xk = repeat_kv(xk, self.n_rep) # (batch_size, seq_len, n_local_heads * n_rep, head_dim)\n",
    "        xv = repeat_kv(xv, self.n_rep)\n",
    "        \n",
    "        # transpose\n",
    "        xq = xq.transpose(1, 2) # (batch_size, n_local_heads, seq_len, head_dim)\n",
    "        xk = xk.transpose(1, 2) # (batch_size, n_local_kv_heads * n_rep, seq_len, head_dim)\n",
    "        xv = xv.transpose(1, 2) # (batch_size, n_local_kv_heads * n_rep, seq_len, head_dim)\n",
    "        \n",
    "        # flash attention\n",
    "        if self.flash:\n",
    "            output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None, dropout_p=self.dropout if self.training else 0.0, is_causal=True)\n",
    "        else:\n",
    "            # manual implementation attention\n",
    "            scores = torch.matmul(xq, xk.transpose(2, 3)) / math.sqrt(self.head_dim)\n",
    "            # scores.shape : batch_size, n_local_heads, seq_len, seq_len\n",
    "            assert hasattr(self, \"mask\")\n",
    "            # mask : 1, 1, seq_len, seq_len and diagonal is 0 and upper triangle is -inf\n",
    "            scores = scores + self.mask[:, :, :seq_len, :seq_len]\n",
    "            scores = F.softmax(scores.float(), dim = -1).type_as(xq)\n",
    "            scores = self.atten_dropout(scores)\n",
    "            output = torch.matmul(scores, xv)\n",
    "            # output shape is batch_size, n_local_heads, seq_len, head_dim\n",
    "            \n",
    "        # restore time as batch dimension and concat heads\n",
    "        output = output.transpose(1, 2).contiguous().view(batch_size, seq_len, -1)\n",
    "        \n",
    "        # final projection into the residual stream\n",
    "        output = self.wo(output)\n",
    "        # output shape : batch_size, seq_len, args.dim\n",
    "        output = self.residual_dropout(output)\n",
    "        return output\n",
    "       \n",
    "class FeedForward(nn.Module):\n",
    "    def __init__(self, dim : int, hidden_dim : int, multiple_of : int, dropout : float):\n",
    "        super().__init__()\n",
    "        if hidden_dim is None:\n",
    "            hidden_dim = 4 * dim\n",
    "            hidden_dim = int(2 * hidden_dim / 3)\n",
    "            hidden_dim = hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)\n",
    "        self.w1 = nn.Linear(dim, hidden_dim, bias = False)\n",
    "        self.w2 = nn.Linear(hidden_dim, dim, bias = False)\n",
    "        self.w3 = nn.Linear(dim, hidden_dim, bias = False)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, x : torch.Tensor):\n",
    "        return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))\n",
    "\n",
    "class RMSNorm(nn.Module):\n",
    "    def __init__(self, dim : int, eps : float):\n",
    "        super().__init__()\n",
    "        self.eps = eps\n",
    "        self.weight = nn.Parameter(torch.ones(dim))\n",
    "        \n",
    "    def _norm(self, x : torch.Tensor):\n",
    "        return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)\n",
    "    \n",
    "    def forward(self, x : torch.Tensor):\n",
    "        # x shape : batch_size, seq_len, dim : 1, 8, 4096\n",
    "        output = self._norm(x.float().type_as(x))\n",
    "        return output.type_as(x) * self.weight.type_as(x)\n",
    "    \n",
    "class TransformerBlock(nn.Module):\n",
    "    def __init__(self, layer_idx : int, args : ModelArgs):\n",
    "        super().__init__()\n",
    "        self.n_heads = args.n_heads\n",
    "        self.dim = args.dim\n",
    "        self.head_dim = args.dim // args.n_heads\n",
    "        self.attention = Attention(args)\n",
    "        self.feed_forward = FeedForward(\n",
    "            dim = args.dim,\n",
    "            hidden_dim = args.hidden_dim,\n",
    "            multiple_of = args.multiple_of,\n",
    "            dropout = args.dropout\n",
    "        )\n",
    "        self.layer_id = layer_idx\n",
    "        self.attention_norm = RMSNorm(args.dim, eps = args.norm_eps)\n",
    "        self.ffn_norm = RMSNorm(args.dim, eps = args.norm_eps)\n",
    "        \n",
    "    def forward(\n",
    "        self,\n",
    "        x : torch.Tensor,\n",
    "        freqs_cos : torch.Tensor,\n",
    "        freqs_sin : torch.Tensor\n",
    "    ):\n",
    "        # input before norm\n",
    "        h = x + self.attention.forward(self.attention_norm.forward(x), freqs_cos, freqs_sin)\n",
    "        out = h + self.feed_forward.forward(self.ffn_norm.forward(h))\n",
    "        return out\n",
    "         \n",
    "class Transformer(nn.Module):\n",
    "    last_loss : Optional[torch.Tensor]\n",
    "    \n",
    "    def __init__(self, params : ModelArgs):\n",
    "        super().__init__()\n",
    "        self.params = params\n",
    "        self.vocab_size = params.vocab_size\n",
    "        self.n_layers = params.n_layers\n",
    "        \n",
    "        self.a = nn.Linear\n",
    "        self.token_embedding = nn.Embedding(params.vocab_size, params.dim)\n",
    "        self.dropout = nn.Dropout(p = params.dropout)\n",
    "        self.layers = nn.ModuleList()\n",
    "        for layer_idx in range(params.n_layers):\n",
    "            self.layers.append(TransformerBlock(layer_idx, params))\n",
    "        self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n",
    "        self.output  = nn.Linear(params.dim, params.vocab_size, bias=False)\n",
    "        \n",
    "        # share the unembedding parameters with the embedding layer\n",
    "        self.token_embedding.weight = self.output.weight\n",
    "        \n",
    "        # some useful precompute for the RoPE relative positional embeddings\n",
    "        freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)\n",
    "        # freq_cos shape : self.params.dim // self.params.n_heads // 2, self.params.max_seq_len\n",
    "        # freq_sin shape : self.params.dim // self.params.n_heads // 2, self.params.max_seq_len\n",
    "        self.register_buffer('freqs_cos', freqs_cos)\n",
    "        self.register_buffer('freqs_sin', freqs_sin)\n",
    "        \n",
    "        # init all weights\n",
    "        self.apply(self._init_weights)\n",
    "        \n",
    "        # apply special scaled init to the residual projection\n",
    "        for pn, p in self.named_parameters():\n",
    "            if pn.endswith(\"w3.weight\") or pn.endswith(\"wo.weight\"):\n",
    "                torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * params.n_layers))\n",
    "        \n",
    "        # Initialize attribute for the loss of the last forward call.\n",
    "        self.last_loss = None\n",
    "        \n",
    "    def _init_weights(self, module):\n",
    "        if isinstance(module, nn.Linear):\n",
    "            torch.nn.init.normal_(module.weight, mean = 0.0, std=0.02)\n",
    "            if module.bias is not None:\n",
    "                torch.nn.init.zeros_(module.bias)\n",
    "            elif isinstance(module, nn.Embedding):\n",
    "                torch.nn.init.normal_(module.weight, mean = 0.0, std=0.02)\n",
    "                \n",
    "        \n",
    "    def forward(self, tokens : torch.Tensor, targets : Optional[torch.Tensor] = None) -> torch.Tensor:\n",
    "        batch_size, seq_len = tokens.shape\n",
    "        # input shape : (1, 8)\n",
    "        h = self.token_embedding(tokens)\n",
    "        # h shape : [1, 8, 4094] batch_size, seq_len, dim\n",
    "        h = self.dropout(h)\n",
    "            \n",
    "        # freqs_cos, freqs_sin shape : [2048, 64]\n",
    "        freqs_cos = self.freqs_cos[:seq_len]\n",
    "        # freqs_cos shape : [8 , 64]\n",
    "        freqs_sin = self.freqs_sin[:seq_len]\n",
    "            \n",
    "        for layer in self.layers:\n",
    "            h = layer.forward(h, freqs_cos, freqs_sin)\n",
    "        h = self.norm.forward(h)\n",
    "            \n",
    "        if targets is not None:\n",
    "            # if we are given some desired targets also calculate the loss\n",
    "            logits = self.output(h)\n",
    "            # logits shape : batch_size, seq_len, vocab_size\n",
    "            # logits.view(-1, logits.size(-1)) -> shape : batch_size * seq_len, vocab_size\n",
    "            self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)\n",
    "        else:\n",
    "            # using list [-1] to preserve the time dim\n",
    "            logits = self.output(h[:, [-1], :])\n",
    "            \n",
    "        return logits\n",
    "        \n",
    "    @torch.inference_mode()\n",
    "    def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):\n",
    "        for _ in range(max_new_tokens):\n",
    "            # if the sequence context is growing too long, we must crop it at block size\n",
    "            idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]\n",
    "            # forward the model to get the logits for the index in the sequence\n",
    "            logits = self.forward(idx_cond)\n",
    "            # crop to get the final time step\n",
    "            logits = logits[:, -1, :]\n",
    "            if temperature == 0.0:\n",
    "                _, idx_next = torch.topk(logits, k = 1, dim = -1)\n",
    "            else:\n",
    "                # pluck the logits at the final step and scale by desired temperature\n",
    "                logits = logits / temperature\n",
    "                if top_k is not None:\n",
    "                    value, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n",
    "                    logits[logits < value[: [-1]]] = -float('Inf')\n",
    "                probs = F.softmax(logits, dim = -1)\n",
    "                idx_next = torch.multinomial(probs, num_samples=1)\n",
    "            # append the new token to the sequence\n",
    "            idx = torch.cat([idx, idx_next], dim = 1)\n",
    "        \n",
    "        return idx\n",
    " \n",
    " \n",
    "args = ModelArgs(\n",
    "    n_kv_heads=8,\n",
    "    hidden_dim=1024,\n",
    ")\n",
    "\n",
    "model = Transformer(args).to('cuda')         "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "state_d = model.state_dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "freqs_cos\n",
      "freqs_sin\n",
      "token_embedding.weight\n",
      "layers.0.attention.wq.weight\n",
      "layers.0.attention.wk.weight\n",
      "layers.0.attention.wv.weight\n",
      "layers.0.attention.wo.weight\n",
      "layers.0.feed_forward.w1.weight\n",
      "layers.0.feed_forward.w2.weight\n",
      "layers.0.feed_forward.w3.weight\n",
      "layers.0.attention_norm.weight\n",
      "layers.0.ffn_norm.weight\n",
      "layers.1.attention.wq.weight\n",
      "layers.1.attention.wk.weight\n",
      "layers.1.attention.wv.weight\n",
      "layers.1.attention.wo.weight\n",
      "layers.1.feed_forward.w1.weight\n",
      "layers.1.feed_forward.w2.weight\n",
      "layers.1.feed_forward.w3.weight\n",
      "layers.1.attention_norm.weight\n",
      "layers.1.ffn_norm.weight\n",
      "layers.2.attention.wq.weight\n",
      "layers.2.attention.wk.weight\n",
      "layers.2.attention.wv.weight\n",
      "layers.2.attention.wo.weight\n",
      "layers.2.feed_forward.w1.weight\n",
      "layers.2.feed_forward.w2.weight\n",
      "layers.2.feed_forward.w3.weight\n",
      "layers.2.attention_norm.weight\n",
      "layers.2.ffn_norm.weight\n",
      "layers.3.attention.wq.weight\n",
      "layers.3.attention.wk.weight\n",
      "layers.3.attention.wv.weight\n",
      "layers.3.attention.wo.weight\n",
      "layers.3.feed_forward.w1.weight\n",
      "layers.3.feed_forward.w2.weight\n",
      "layers.3.feed_forward.w3.weight\n",
      "layers.3.attention_norm.weight\n",
      "layers.3.ffn_norm.weight\n",
      "layers.4.attention.wq.weight\n",
      "layers.4.attention.wk.weight\n",
      "layers.4.attention.wv.weight\n",
      "layers.4.attention.wo.weight\n",
      "layers.4.feed_forward.w1.weight\n",
      "layers.4.feed_forward.w2.weight\n",
      "layers.4.feed_forward.w3.weight\n",
      "layers.4.attention_norm.weight\n",
      "layers.4.ffn_norm.weight\n",
      "layers.5.attention.wq.weight\n",
      "layers.5.attention.wk.weight\n",
      "layers.5.attention.wv.weight\n",
      "layers.5.attention.wo.weight\n",
      "layers.5.feed_forward.w1.weight\n",
      "layers.5.feed_forward.w2.weight\n",
      "layers.5.feed_forward.w3.weight\n",
      "layers.5.attention_norm.weight\n",
      "layers.5.ffn_norm.weight\n",
      "layers.6.attention.wq.weight\n",
      "layers.6.attention.wk.weight\n",
      "layers.6.attention.wv.weight\n",
      "layers.6.attention.wo.weight\n",
      "layers.6.feed_forward.w1.weight\n",
      "layers.6.feed_forward.w2.weight\n",
      "layers.6.feed_forward.w3.weight\n",
      "layers.6.attention_norm.weight\n",
      "layers.6.ffn_norm.weight\n",
      "layers.7.attention.wq.weight\n",
      "layers.7.attention.wk.weight\n",
      "layers.7.attention.wv.weight\n",
      "layers.7.attention.wo.weight\n",
      "layers.7.feed_forward.w1.weight\n",
      "layers.7.feed_forward.w2.weight\n",
      "layers.7.feed_forward.w3.weight\n",
      "layers.7.attention_norm.weight\n",
      "layers.7.ffn_norm.weight\n",
      "layers.8.attention.wq.weight\n",
      "layers.8.attention.wk.weight\n",
      "layers.8.attention.wv.weight\n",
      "layers.8.attention.wo.weight\n",
      "layers.8.feed_forward.w1.weight\n",
      "layers.8.feed_forward.w2.weight\n",
      "layers.8.feed_forward.w3.weight\n",
      "layers.8.attention_norm.weight\n",
      "layers.8.ffn_norm.weight\n",
      "layers.9.attention.wq.weight\n",
      "layers.9.attention.wk.weight\n",
      "layers.9.attention.wv.weight\n",
      "layers.9.attention.wo.weight\n",
      "layers.9.feed_forward.w1.weight\n",
      "layers.9.feed_forward.w2.weight\n",
      "layers.9.feed_forward.w3.weight\n",
      "layers.9.attention_norm.weight\n",
      "layers.9.ffn_norm.weight\n",
      "layers.10.attention.wq.weight\n",
      "layers.10.attention.wk.weight\n",
      "layers.10.attention.wv.weight\n",
      "layers.10.attention.wo.weight\n",
      "layers.10.feed_forward.w1.weight\n",
      "layers.10.feed_forward.w2.weight\n",
      "layers.10.feed_forward.w3.weight\n",
      "layers.10.attention_norm.weight\n",
      "layers.10.ffn_norm.weight\n",
      "layers.11.attention.wq.weight\n",
      "layers.11.attention.wk.weight\n",
      "layers.11.attention.wv.weight\n",
      "layers.11.attention.wo.weight\n",
      "layers.11.feed_forward.w1.weight\n",
      "layers.11.feed_forward.w2.weight\n",
      "layers.11.feed_forward.w3.weight\n",
      "layers.11.attention_norm.weight\n",
      "layers.11.ffn_norm.weight\n",
      "layers.12.attention.wq.weight\n",
      "layers.12.attention.wk.weight\n",
      "layers.12.attention.wv.weight\n",
      "layers.12.attention.wo.weight\n",
      "layers.12.feed_forward.w1.weight\n",
      "layers.12.feed_forward.w2.weight\n",
      "layers.12.feed_forward.w3.weight\n",
      "layers.12.attention_norm.weight\n",
      "layers.12.ffn_norm.weight\n",
      "layers.13.attention.wq.weight\n",
      "layers.13.attention.wk.weight\n",
      "layers.13.attention.wv.weight\n",
      "layers.13.attention.wo.weight\n",
      "layers.13.feed_forward.w1.weight\n",
      "layers.13.feed_forward.w2.weight\n",
      "layers.13.feed_forward.w3.weight\n",
      "layers.13.attention_norm.weight\n",
      "layers.13.ffn_norm.weight\n",
      "layers.14.attention.wq.weight\n",
      "layers.14.attention.wk.weight\n",
      "layers.14.attention.wv.weight\n",
      "layers.14.attention.wo.weight\n",
      "layers.14.feed_forward.w1.weight\n",
      "layers.14.feed_forward.w2.weight\n",
      "layers.14.feed_forward.w3.weight\n",
      "layers.14.attention_norm.weight\n",
      "layers.14.ffn_norm.weight\n",
      "layers.15.attention.wq.weight\n",
      "layers.15.attention.wk.weight\n",
      "layers.15.attention.wv.weight\n",
      "layers.15.attention.wo.weight\n",
      "layers.15.feed_forward.w1.weight\n",
      "layers.15.feed_forward.w2.weight\n",
      "layers.15.feed_forward.w3.weight\n",
      "layers.15.attention_norm.weight\n",
      "layers.15.ffn_norm.weight\n",
      "layers.16.attention.wq.weight\n",
      "layers.16.attention.wk.weight\n",
      "layers.16.attention.wv.weight\n",
      "layers.16.attention.wo.weight\n",
      "layers.16.feed_forward.w1.weight\n",
      "layers.16.feed_forward.w2.weight\n",
      "layers.16.feed_forward.w3.weight\n",
      "layers.16.attention_norm.weight\n",
      "layers.16.ffn_norm.weight\n",
      "layers.17.attention.wq.weight\n",
      "layers.17.attention.wk.weight\n",
      "layers.17.attention.wv.weight\n",
      "layers.17.attention.wo.weight\n",
      "layers.17.feed_forward.w1.weight\n",
      "layers.17.feed_forward.w2.weight\n",
      "layers.17.feed_forward.w3.weight\n",
      "layers.17.attention_norm.weight\n",
      "layers.17.ffn_norm.weight\n",
      "layers.18.attention.wq.weight\n",
      "layers.18.attention.wk.weight\n",
      "layers.18.attention.wv.weight\n",
      "layers.18.attention.wo.weight\n",
      "layers.18.feed_forward.w1.weight\n",
      "layers.18.feed_forward.w2.weight\n",
      "layers.18.feed_forward.w3.weight\n",
      "layers.18.attention_norm.weight\n",
      "layers.18.ffn_norm.weight\n",
      "layers.19.attention.wq.weight\n",
      "layers.19.attention.wk.weight\n",
      "layers.19.attention.wv.weight\n",
      "layers.19.attention.wo.weight\n",
      "layers.19.feed_forward.w1.weight\n",
      "layers.19.feed_forward.w2.weight\n",
      "layers.19.feed_forward.w3.weight\n",
      "layers.19.attention_norm.weight\n",
      "layers.19.ffn_norm.weight\n",
      "layers.20.attention.wq.weight\n",
      "layers.20.attention.wk.weight\n",
      "layers.20.attention.wv.weight\n",
      "layers.20.attention.wo.weight\n",
      "layers.20.feed_forward.w1.weight\n",
      "layers.20.feed_forward.w2.weight\n",
      "layers.20.feed_forward.w3.weight\n",
      "layers.20.attention_norm.weight\n",
      "layers.20.ffn_norm.weight\n",
      "layers.21.attention.wq.weight\n",
      "layers.21.attention.wk.weight\n",
      "layers.21.attention.wv.weight\n",
      "layers.21.attention.wo.weight\n",
      "layers.21.feed_forward.w1.weight\n",
      "layers.21.feed_forward.w2.weight\n",
      "layers.21.feed_forward.w3.weight\n",
      "layers.21.attention_norm.weight\n",
      "layers.21.ffn_norm.weight\n",
      "layers.22.attention.wq.weight\n",
      "layers.22.attention.wk.weight\n",
      "layers.22.attention.wv.weight\n",
      "layers.22.attention.wo.weight\n",
      "layers.22.feed_forward.w1.weight\n",
      "layers.22.feed_forward.w2.weight\n",
      "layers.22.feed_forward.w3.weight\n",
      "layers.22.attention_norm.weight\n",
      "layers.22.ffn_norm.weight\n",
      "layers.23.attention.wq.weight\n",
      "layers.23.attention.wk.weight\n",
      "layers.23.attention.wv.weight\n",
      "layers.23.attention.wo.weight\n",
      "layers.23.feed_forward.w1.weight\n",
      "layers.23.feed_forward.w2.weight\n",
      "layers.23.feed_forward.w3.weight\n",
      "layers.23.attention_norm.weight\n",
      "layers.23.ffn_norm.weight\n",
      "layers.24.attention.wq.weight\n",
      "layers.24.attention.wk.weight\n",
      "layers.24.attention.wv.weight\n",
      "layers.24.attention.wo.weight\n",
      "layers.24.feed_forward.w1.weight\n",
      "layers.24.feed_forward.w2.weight\n",
      "layers.24.feed_forward.w3.weight\n",
      "layers.24.attention_norm.weight\n",
      "layers.24.ffn_norm.weight\n",
      "layers.25.attention.wq.weight\n",
      "layers.25.attention.wk.weight\n",
      "layers.25.attention.wv.weight\n",
      "layers.25.attention.wo.weight\n",
      "layers.25.feed_forward.w1.weight\n",
      "layers.25.feed_forward.w2.weight\n",
      "layers.25.feed_forward.w3.weight\n",
      "layers.25.attention_norm.weight\n",
      "layers.25.ffn_norm.weight\n",
      "layers.26.attention.wq.weight\n",
      "layers.26.attention.wk.weight\n",
      "layers.26.attention.wv.weight\n",
      "layers.26.attention.wo.weight\n",
      "layers.26.feed_forward.w1.weight\n",
      "layers.26.feed_forward.w2.weight\n",
      "layers.26.feed_forward.w3.weight\n",
      "layers.26.attention_norm.weight\n",
      "layers.26.ffn_norm.weight\n",
      "layers.27.attention.wq.weight\n",
      "layers.27.attention.wk.weight\n",
      "layers.27.attention.wv.weight\n",
      "layers.27.attention.wo.weight\n",
      "layers.27.feed_forward.w1.weight\n",
      "layers.27.feed_forward.w2.weight\n",
      "layers.27.feed_forward.w3.weight\n",
      "layers.27.attention_norm.weight\n",
      "layers.27.ffn_norm.weight\n",
      "layers.28.attention.wq.weight\n",
      "layers.28.attention.wk.weight\n",
      "layers.28.attention.wv.weight\n",
      "layers.28.attention.wo.weight\n",
      "layers.28.feed_forward.w1.weight\n",
      "layers.28.feed_forward.w2.weight\n",
      "layers.28.feed_forward.w3.weight\n",
      "layers.28.attention_norm.weight\n",
      "layers.28.ffn_norm.weight\n",
      "layers.29.attention.wq.weight\n",
      "layers.29.attention.wk.weight\n",
      "layers.29.attention.wv.weight\n",
      "layers.29.attention.wo.weight\n",
      "layers.29.feed_forward.w1.weight\n",
      "layers.29.feed_forward.w2.weight\n",
      "layers.29.feed_forward.w3.weight\n",
      "layers.29.attention_norm.weight\n",
      "layers.29.ffn_norm.weight\n",
      "layers.30.attention.wq.weight\n",
      "layers.30.attention.wk.weight\n",
      "layers.30.attention.wv.weight\n",
      "layers.30.attention.wo.weight\n",
      "layers.30.feed_forward.w1.weight\n",
      "layers.30.feed_forward.w2.weight\n",
      "layers.30.feed_forward.w3.weight\n",
      "layers.30.attention_norm.weight\n",
      "layers.30.ffn_norm.weight\n",
      "layers.31.attention.wq.weight\n",
      "layers.31.attention.wk.weight\n",
      "layers.31.attention.wv.weight\n",
      "layers.31.attention.wo.weight\n",
      "layers.31.feed_forward.w1.weight\n",
      "layers.31.feed_forward.w2.weight\n",
      "layers.31.feed_forward.w3.weight\n",
      "layers.31.attention_norm.weight\n",
      "layers.31.ffn_norm.weight\n",
      "norm.weight\n",
      "output.weight\n"
     ]
    }
   ],
   "source": [
    "for k, v in state_d.items():\n",
    "    print(k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "#words: 32000 - BOS ID: 1 - EOS ID: 2\n",
      "#words: 32000 - BOS ID: 1 - EOS ID: 2\n"
     ]
    }
   ],
   "source": [
    "test_sentence = \"write a python program to print hello world.\"\n",
    "\n",
    "from tokenizer import Tokenizer\n",
    "\n",
    "sentence_encode = Tokenizer().encode(test_sentence, True, True)\n",
    "sentence_input = torch.Tensor(sentence_encode)\n",
    "\n",
    "sentence_input = sentence_input[None, :]\n",
    "\n",
    "idx_out = model.generate(sentence_input.long().to('cuda'), max_new_tokens=10) \n",
    "\n",
    "res = Tokenizer().decode(idx_out[0].tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'write a python program to print hello world. op sullefree otra Step IntroductionUUILDmapduct'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "b'`\\xad\\xa5=Xa\\xfe=`\\x03\\xff<\\xab\\x8c\\x0c?\\x00\"\\t<\\xa80\\xfc>P\\xaa^=5\\xa9\\x12?\\xa0\\x82\\xdc>g)8?\\x80\\x9c\\xf4<\\xd8\\x80e?8\\xdc\\xf6>\\xd2\\xb5|?\\x00\\x08\\x84=:\\x82\\xf9>\\xa9f9?\\xfa\\'\\x96>\\xf8\\\\\\x10?\\xaa\\\\\\xba>\\xc0\\xf0X<\\xab}X?Yq\\x15?\\xfa\\x8eI?W\\xb4m?\\x97\\x045?\\xf4\\xfa\\x11>\\xe6\\x81\\t?\\xb4\\x10 >R\\xdb\\n?w0}?d\\x0b\\xb6>Y\\x0fD?V\\xa4\\xca>f\\x08M?P\\tm=\\x18\\xb9\\xfd=-\\x02I?\\xca\\xbcA?\\xb7(\\x10?,\\x87{>\\xdc\\xa7\\xb7>7\\x8bh?\\xc0\\x0bH?\\x85\\x8ct?\\x87\\xd8a?\\x12\\xe0\\xe8>\\xf2\\xa0\\xde>\\x16q\\x00?@\\xf9\\xec>\\\\\\xd6\\xc2>\\xac<\\xb4>\\xc8\\xaa\\x8b>@)d??yh?\\x00\\xf5\\x83<\\xe9\\x0c\\x0b?\\xf2:\\x0c?@!a>Hpp?'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import struct\n",
    "tens = torch.rand((3, 4, 5))\n",
    "d = tens.detach().cpu().view(-1)\n",
    "b = struct.pack(f\"{len(d)}f\", *d)\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "60"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tens.numel()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def quantize_symmetry_int8(w : torch.Tensor, group_size : int):\n",
    "    '''\n",
    "    symmetric quantization into int8, range[-127,127]\n",
    "    https://mp.weixin.qq.com/s/EGPQNemfoyE1QnyJyjgsfg\n",
    "    '''\n",
    "    assert w.numel() % group_size == 0\n",
    "    origin_shape = w.shape\n",
    "    w = w.float()\n",
    "    w = w.reshape(-1, group_size)\n",
    "    # find max in each group\n",
    "    wmax = torch.abs(w).max(dim=1).values\n",
    "    # calculate the scaling factor\n",
    "    scale = wmax / 127.0\n",
    "    # scale into range[-127, 127]\n",
    "    quant = w / scale[:, None]\n",
    "    # round to nearest integer\n",
    "    int8val = torch.round(quant).to(torch.int8)\n",
    "    # dequantize by rescaling\n",
    "    fp32val = (int8val.float() * scale[:, None]).view(-1)\n",
    "    fp32valr= fp32val.reshape(-1, group_size)\n",
    "    # calculate the max error in each group\n",
    "    err = torch.abs(fp32valr - w).max(dim=1).values\n",
    "    # find the max error across all groups\n",
    "    max_err = err.max().item()\n",
    "    return int8val, scale, max_err\n",
    "\n",
    "def quantize_asymm_int8(w : torch.Tensor, group_size : int):\n",
    "    quantized_data_type = torch.int8\n",
    "    \n",
    "    origin_shape = w.shape\n",
    "    \n",
    "    temp_w = w.float().detach().cpu().view(-1)\n",
    "    \n",
    "    Wmax = temp_w.max().item()\n",
    "    Wmin = temp_w.min().item()\n",
    "    \n",
    "    # Get the Qmax and Qmin from quantized data type info\n",
    "    Qmax = torch.iinfo(quantized_data_type).max\n",
    "    Qmin = torch.iinfo(quantized_data_type).min\n",
    "    \n",
    "    S = (Wmax - Wmin) / (Qmax - Qmin)\n",
    "    \n",
    "    # Zero point value\n",
    "    Z = Qmin - Wmin / S\n",
    "    \n",
    "    if Z < Qmin:\n",
    "        Z = Qmin\n",
    "    elif Z > Qmax:\n",
    "        Z = Qmax\n",
    "    else:\n",
    "        Z = int(round(Z))\n",
    "        \n",
    "    quantized_temp_w = (temp_w / S) + Z\n",
    "    quantized_temp_w = torch.clamp(torch.round(quantized_temp_w), Qmin, Qmax)\n",
    "    \n",
    "    quantized_temp_w = quantized_temp_w.to(quantized_data_type)\n",
    "    quantized_temp_w = quantized_temp_w.view(-1, group_size)\n",
    "    \n",
    "    temp_w = temp_w.view(-1, group_size)\n",
    "    dequantized_temp_w = (quantized_temp_w - Z) * S\n",
    "    \n",
    "    err = torch.abs(dequantized_temp_w - temp_w).max(dim=1).values\n",
    "    max_err = err.max().item()\n",
    "    \n",
    "    return quantized_temp_w, S, Z, max_err"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[-0.3183,  1.5846,  1.5001,  0.2914],\n",
      "        [-1.7153,  0.8439, -0.6805,  0.6445],\n",
      "        [ 0.5969,  0.2284, -0.7909,  0.0560],\n",
      "        [ 0.7423, -0.4352,  0.9630,  0.1770]])\n"
     ]
    }
   ],
   "source": [
    "original_weight = torch.randn((4,4))\n",
    "print(original_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "aaa, _, _, a_err = quantize_asymm_int8(original_weight, 2)\n",
    "bbb, _, b_err = quantize_symmetry_int8(original_weight, 2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([8, 2])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "aaa.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([8, 2])"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bbb.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3.306993246078491"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a_err"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.006501197814941406"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b_err"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "hamster",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "undefined.undefined.undefined"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
