{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<torch._C.Generator at 0x22392c5d4b0>"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader\n",
    "from datasets import load_dataset\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "torch.manual_seed(12046)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNN(nn.Module):\n",
    "\n",
    "    def __init__(self,input_size,hidden_size):\n",
    "\n",
    "        super().__init__()\n",
    "        self.input_size = input_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.i2h = nn.Linear(input_size + hidden_size,hidden_size)\n",
    "\n",
    "    def forward(self,input,hidden = None):\n",
    "        # intput (B, T ,C)\n",
    "        # hidden (B,    H)\n",
    "        # out    (B, T , H)\n",
    "        B,T,C = input.shape\n",
    "        re = []\n",
    "        if hidden is None:\n",
    "            hidden = self.init_hidden(B,input.device)\n",
    "        for i in range(T) :\n",
    "            combinded = torch.concat((input[:,i,:],hidden),dim = -1) #(B, C + H)\n",
    "            hidden = F.relu(self.i2h(combinded))\n",
    "            re.append(hidden)\n",
    "        # re 是 list[tensor]\n",
    "        return torch.stack(re,dim = 1)  #输出形状是(B,T,H)\n",
    "    def init_hidden(self,B,device):\n",
    "        return torch.zeros((B,self.hidden_size),device=device)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[tensor([[0.5956, 0.7557, 0.7754, 0.2783],\n",
      "        [0.0650, 0.7709, 0.8761, 0.2821],\n",
      "        [0.1134, 0.6077, 0.1158, 0.1367]]), tensor([[0.4014, 0.9480, 0.0035, 0.3945],\n",
      "        [0.3714, 0.1595, 0.9217, 0.9083],\n",
      "        [0.2280, 0.2509, 0.6837, 0.0671]])]\n",
      "torch.Size([3, 4, 2])\n"
     ]
    }
   ],
   "source": [
    "a = torch.rand(3,4)\n",
    "b = torch.rand(3,4)\n",
    "c = [a,b]\n",
    "print(c)\n",
    "# 需要了解stack的效果\n",
    "print(torch.stack(c,dim = 2).shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([5, 10, 4])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "r = RNN(3,4)            # 3表示文本特征是3,4表示输出神经元个数是4,正常情况下输出的应该是字典大小\n",
    "x = torch.randn(5,10,3) # 5表示5条文本,10表示每条文本长度是10,3表示文本特征是3\n",
    "r(x).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = torch.randn(5,10,3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CharRNNBatch(nn.Module):\n",
    "    \n",
    "    def __init__(self,vs):\n",
    "        super().__init__()\n",
    "        emb_size = 256\n",
    "        hidden_size = 128\n",
    "        self.emb = nn.Embedding(vs,emb_size)\n",
    "        self.rnn1 = RNN(emb_size,hidden_size)\n",
    "        self.ln1 = nn.LayerNorm(hidden_size)\n",
    "        self.rnn2 = RNN(hidden_size,hidden_size)\n",
    "        self.ln2 = nn.LayerNorm(hidden_size,vs)\n",
    "        self.lm = nn.Linear(hidden_size,vs) #语言建模头  \n",
    "        self.dp = nn.Dropout(0.4)\n",
    "    def forward(self,x):\n",
    "        # x (B, T)\n",
    "        # 暂不实现初始隐藏状态的输入\n",
    "        B = x.shape[0]\n",
    "        embeddings = self.emb(x) \n",
    "        h = F.relu(self.ln1(self.rnn1(embeddings)))\n",
    "        h = self.dp(h)\n",
    "        h = F.relu(self.ln2(self.rnn2(h)))  \n",
    "        h = self.dp(h)\n",
    "        out = self.lm(h)            # (B,T,vs) \n",
    "        \n",
    "        \n",
    "        \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "CharRNNBatch(\n",
       "  (emb): Embedding(98, 256)\n",
       "  (rnn1): RNN(\n",
       "    (i2h): Linear(in_features=384, out_features=128, bias=True)\n",
       "  )\n",
       "  (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "  (rnn2): RNN(\n",
       "    (i2h): Linear(in_features=256, out_features=128, bias=True)\n",
       "  )\n",
       "  (ln2): LayerNorm((128,), eps=98, elementwise_affine=True)\n",
       "  (lm): Linear(in_features=128, out_features=98, bias=True)\n",
       "  (dp): Dropout(p=0.4, inplace=False)\n",
       ")"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c_model = CharRNNBatch(98)\n",
    "c_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "@torch.no_grad()\n",
    "def generate(model,context,tokenizer,max_tokens = 300):\n",
    "    # context (1)\n",
    "    out = context.tolist()[0]\n",
    "    model.eval() #打开推理模式\n",
    "    for _ in range(max_tokens):\n",
    "        logits = model(context)             # (1,T,98)\n",
    "        probs = F.softmax(logits[:,-1,:],dim = -1)  # (1,98)\n",
    "        # 随机抽样\n",
    "        ix = torch.multinomial(probs,num_samples = 1)   #(1,1)\n",
    "        # 更新背景\n",
    "        context = torch.concat((context,ix),dim = -1)\n",
    "        out.append(ix.item())\n",
    "        if out[-1] == tokenizer.end_ind:\n",
    "            break\n",
    "    model.train()\n",
    "    return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using the latest cached version of the module from C:\\Users\\35139\\.cache\\huggingface\\modules\\datasets_modules\\datasets\\code_search_net\\8f2524e6b62f65af5f5d65c53715c654db7b08dc93e0b7bcce2ab2f286a75be1 (last modified on Thu Feb 20 11:36:52 2025) since it couldn't be found locally at code_search_net, or remotely on the Hugging Face Hub.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'def f(x):'"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "raw_datasets = load_dataset('code_search_net', 'python')\n",
    "datasets = raw_datasets['train'].filter(lambda x: 'apache/spark' in x['repository_name'])\n",
    "# 分词器的实现\n",
    "class CharTokenizer:\n",
    "    def __init__(self,data:list[str],end_ind = 0):\n",
    "        # data : list[str]   \n",
    "        chars = sorted(list(set(''.join(data))))\n",
    "        self.char2ind = {s: i + 1 for i,s in enumerate(chars)}\n",
    "        self.char2ind['<|e>'] = end_ind\n",
    "\n",
    "        # 转换索引\n",
    "        self.ind2char = {v : k for k,v in self.char2ind.items()}\n",
    "        self.end_ind = end_ind\n",
    "\n",
    "        pass\n",
    "\n",
    "    def encode(self,x : str):\n",
    "        return [self.char2ind[s] for s in x]\n",
    "\n",
    "    def decode(self,x : list[int]| int):\n",
    "        if isinstance(x,int):\n",
    "            return self.ind2char(x)\n",
    "        return [self.ind2char[i] for i in x]\n",
    "tokenizer =CharTokenizer(datasets['whole_func_string'])\n",
    "\n",
    "test_str ='def f(x):'\n",
    "re = tokenizer.encode(test_str)\n",
    "''.join(tokenizer.decode(re))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "'NoneType' object is not subscriptable",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[20], line 2\u001b[0m\n\u001b[0;32m      1\u001b[0m context \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mtensor(tokenizer\u001b[38;5;241m.\u001b[39mencode(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdef\u001b[39m\u001b[38;5;124m\"\u001b[39m))\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m----> 2\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m_\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(tokenizer\u001b[38;5;241m.\u001b[39mdecode(generate(c_model,context,tokenizer)))\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\torch\\utils\\_contextlib.py:116\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    113\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[0;32m    114\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m    115\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[1;32m--> 116\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "Cell \u001b[1;32mIn[18], line 8\u001b[0m, in \u001b[0;36mgenerate\u001b[1;34m(model, context, tokenizer, max_tokens)\u001b[0m\n\u001b[0;32m      6\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(max_tokens):\n\u001b[0;32m      7\u001b[0m     logits \u001b[38;5;241m=\u001b[39m model(context)             \u001b[38;5;66;03m# (1,T,98)\u001b[39;00m\n\u001b[1;32m----> 8\u001b[0m     probs \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39msoftmax(logits[:,\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m,:],dim \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m)  \u001b[38;5;66;03m# (1,98)\u001b[39;00m\n\u001b[0;32m      9\u001b[0m     \u001b[38;5;66;03m# 随机抽样\u001b[39;00m\n\u001b[0;32m     10\u001b[0m     ix \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mmultinomial(probs,num_samples \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m)   \u001b[38;5;66;03m#(1,1)\u001b[39;00m\n",
      "\u001b[1;31mTypeError\u001b[0m: 'NoneType' object is not subscriptable"
     ]
    }
   ],
   "source": [
    "context = torch.tensor(tokenizer.encode(\"def\")).unsqueeze(0)\n",
    "'_'.join(tokenizer.decode(generate(c_model,context,tokenizer)))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
