{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import pandas as pd\n",
    "from dataclasses import dataclass\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "from torch.utils.data import Dataset\n",
    "from torch.utils.data.dataloader import DataLoader\n",
    "from torch.utils.tensorboard import SummaryWriter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_csv('./dataset/waimai_10k.csv')\n",
    "data.dropna(subset='review',inplace=True)\n",
    "data['review_length'] = data.review.apply(lambda x:len(x))\n",
    "data.sample(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = data[data.review_length <= 50] # 过滤长度小于50的评价\n",
    "words = data.review.tolist()\n",
    "chars = sorted(list(set(''.join(words))))\n",
    "max_word_length = max(len(w) for w in words)\n",
    "\n",
    "print(f\"number of examples: {len(words)}\")\n",
    "print(f\"max word length: {max_word_length}\")\n",
    "print(f\"size of vocabulary: {len(chars)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_set_size = min(1000, int(len(words) * 0.1))\n",
    "rp = torch.randperm(len(words)).tolist()\n",
    "train_words = [words[i] for i in rp[:-test_set_size]]\n",
    "test_words = [words[i] for i in rp[-test_set_size:]]\n",
    "print(f\"split up the dataset into {len(train_words)} training examples and {len(test_words)} test examples\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CharDataset(Dataset):\n",
    "\n",
    "    def __init__(self, words, chars, max_word_length, block_size=1):\n",
    "        self.words = words\n",
    "        self.chars = chars\n",
    "        self.max_word_length = max_word_length\n",
    "        self.block_size = block_size\n",
    "        # char-->index-->char\n",
    "        self.char2i = {ch:i+1 for i,ch in enumerate(chars)}\n",
    "        self.i2char = {i:s for s,i in self.char2i.items()}\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.words)\n",
    "\n",
    "    def contains(self, word):\n",
    "        return word in self.words\n",
    "\n",
    "    def get_vocab_size(self):\n",
    "        return len(self.chars) + 1      # add a special\n",
    "\n",
    "    def get_output_length(self):\n",
    "        return self.max_word_length + 1\n",
    "\n",
    "    def encode(self, word):\n",
    "        # char sequece ---> index sequence\n",
    "        ix = torch.tensor([self.char2i[w] for w in word], dtype=torch.long)\n",
    "        return ix\n",
    "\n",
    "    def decode(self, ix):\n",
    "        # index sequence ---> char sequence\n",
    "        word = ''.join(self.i2char[i] for i in ix)\n",
    "        return word\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        word = self.words[idx]\n",
    "        ix = self.encode(word)\n",
    "        x = torch.zeros(self.max_word_length + self.block_size, dtype=torch.long)\n",
    "        y = torch.zeros(self.max_word_length, dtype=torch.long)\n",
    "        x[self.block_size:len(ix)+self.block_size] = ix\n",
    "        y[:len(ix)] = ix\n",
    "        # len(ix)+1\n",
    "        y[len(ix)+1:] = -1 # index -1 will mask the loss at the inactive locations\n",
    "        if self.block_size > 1:\n",
    "            xs = []\n",
    "            for i in range(x.shape[0]-self.block_size):\n",
    "                xs.append(x[i:i+self.block_size].unsqueeze(0))\n",
    "            return torch.cat(xs), y\n",
    "        else:\n",
    "            return x, y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "class InfiniteDataLoader:\n",
    "\n",
    "    def __init__(self, dataset, **kwargs):\n",
    "        train_sampler = torch.utils.data.RandomSampler(dataset, replacement=True, num_samples=int(1e10))\n",
    "        self.train_loader = DataLoader(dataset, sampler=train_sampler, **kwargs)\n",
    "        self.data_iter = iter(self.train_loader)\n",
    "\n",
    "    def next(self):\n",
    "        try:\n",
    "            batch = next(self.data_iter)\n",
    "        except StopIteration: # this will technically only happen after 1e10 samples... (i.e. basically never)\n",
    "            self.data_iter = iter(self.train_loader)\n",
    "            batch = next(self.data_iter)\n",
    "        return batch\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "@dataclass\n",
    "class ModelConfig:\n",
    "    block_size: int = None # length of the input sequences\n",
    "    vocab_size: int = None # size of vocabulary\n",
    "    n_embed : int = None\n",
    "    n_hidden: int = None\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP(nn.Module):\n",
    "    \"\"\"\n",
    "    takes the previous block_size tokens, encodes them with a lookup table,\n",
    "    concatenates the vectors and predicts the next token with an MLP.\n",
    "\n",
    "    Reference:\n",
    "    Bengio et al. 2003 https://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, config):\n",
    "        super().__init__()\n",
    "        self.block_size = config.block_size\n",
    "        self.vocab_size = config.vocab_size\n",
    "        self.wte = nn.Embedding(config.vocab_size + 1, config.n_embed) # token embeddings table\n",
    "        # +1 for a special token\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(self.block_size * config.n_embed, config.n_hidden),\n",
    "            nn.Tanh(),\n",
    "            nn.Linear(config.n_hidden, self.vocab_size)\n",
    "        )\n",
    "\n",
    "    def get_block_size(self):\n",
    "        return self.block_size\n",
    "\n",
    "    def forward(self, idx, targets=None):\n",
    "\n",
    "        # gather the word embeddings of the previous 3 words\n",
    "        embs = []\n",
    "        for k in range(self.block_size):\n",
    "            tok_emb = self.wte(idx[:,:,k]) # token embeddings of shape (b, t, n_embd)\n",
    "            #idx = torch.roll(idx, 1, 1)\n",
    "            #idx[:, 0] = self.vocab_size # special <BLANK> token\n",
    "            embs.append(tok_emb)\n",
    "\n",
    "        # concat all of the embeddings together and pass through an MLP\n",
    "        x = torch.cat(embs, -1) # (b, t, n_embd * block_size)\n",
    "        logits = self.mlp(x)\n",
    "\n",
    "        # if we are given some desired targets also calculate the loss\n",
    "        loss = None\n",
    "        if targets is not None:\n",
    "            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)\n",
    "\n",
    "        return logits, loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "@torch.inference_mode()\n",
    "def evaluate(model, dataset, batch_size=10, max_batches=None):\n",
    "    model.eval()\n",
    "    loader = DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=0)\n",
    "    losses = []\n",
    "    for i, batch in enumerate(loader):\n",
    "        batch = [t.to('cuda') for t in batch]\n",
    "        X, Y = batch\n",
    "        logits, loss = model(X, Y)\n",
    "        losses.append(loss.item())\n",
    "        if max_batches is not None and i >= max_batches:\n",
    "            break\n",
    "    mean_loss = torch.tensor(losses).mean().item()\n",
    "    model.train() # reset model back to training mode\n",
    "    return mean_loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.manual_seed(seed=12345)\n",
    "torch.cuda.manual_seed_all(seed=12345)\n",
    "\n",
    "work_dir = \"./Mlp_log\"\n",
    "os.makedirs(work_dir, exist_ok=True)\n",
    "writer = SummaryWriter(log_dir=work_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = MLP(config)\n",
    "\n",
    "model.to('cuda')\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
