{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "device =  cuda\n"
     ]
    }
   ],
   "source": [
    "import train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "datas = train.dataset.get_data_set()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "fanzxl = datas[\"xiaohuangji\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "list"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(fanzxl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_all_token(datas):\n",
    "    token2idx = {}\n",
    "    token2idx.update({\"EOF\":0})\n",
    "    idx = 1\n",
    "    for sentence in datas:\n",
    "        stc = sentence['sentence']\n",
    "        for word in stc:\n",
    "            if word not in token2idx.keys():\n",
    "                token2idx.update({word: idx})\n",
    "                idx += 1\n",
    "    return token2idx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "token2idx = get_all_token(fanzxl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7876"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(fanzxl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "\n",
    "def sentence2vec(sentence, token2idx):\n",
    "    tokens = torch.LongTensor()\n",
    "    for token in sentence:\n",
    "        tokens = torch.cat([tokens, torch.LongTensor([token2idx[token]])])\n",
    "    return tokens\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 1])"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sentence2vec(fanzxl[0]['sentence'], token2idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "from copy import deepcopy\n",
    "\n",
    "\n",
    "def sentence_filter(datas, long_limit):\n",
    "    datas_f = []\n",
    "    for sentence in datas:\n",
    "        stc = sentence['sentence']\n",
    "        if len(stc) < long_limit:\n",
    "            datas_f += [sentence]\n",
    "    return datas_f"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "fanzxl_f = sentence_filter(fanzxl, 30)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "29 832\n"
     ]
    }
   ],
   "source": [
    "temp = 0\n",
    "idx = 0\n",
    "\n",
    "for i, sentence in enumerate(fanzxl_f):\n",
    "    stc = sentence['sentence']\n",
    "    if len(stc) > temp:\n",
    "        temp = len(stc)\n",
    "        idx = i\n",
    "print(temp, idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import Dataset\n",
    "\n",
    "class Seq2seqDataSet(Dataset):\n",
    "    def __init__(self, datas, token2idx, long_limit):\n",
    "        super(Seq2seqDataSet, self).__init__()\n",
    "        self.datas = sentence_filter(datas, long_limit)\n",
    "        self.token2idx = token2idx\n",
    "        self.long_limit = long_limit\n",
    "\n",
    "    def __len__(self):\n",
    "        return (len(self.datas)-1)\n",
    "\n",
    "    def __getitem__(self, item):\n",
    "        q = sentence2vec(self.datas[item]['sentence'], token2idx)\n",
    "        a = sentence2vec(self.datas[item + 1]['sentence'], token2idx)\n",
    "\n",
    "        if len(q) < self.long_limit:\n",
    "            q = torch.cat([q, torch.LongTensor([0]*(self.long_limit - len(q)))])\n",
    "        \n",
    "        if len(a) < self.long_limit:\n",
    "            a = torch.cat([a, torch.LongTensor([0]*(self.long_limit - len(a)))])\n",
    "        \n",
    "        return q, a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataseet = Seq2seqDataSet(fanzxl, token2idx, long_limit=30)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "dataloader = DataLoader(dataseet, batch_size=8, shuffle=True,\n",
    "                            num_workers=0,\n",
    "                            drop_last=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如何组成训练数据\n",
    "def collate_fn(batch):\n",
    "    src = []\n",
    "    target = []\n",
    "    for i in range(len(datas) - 1):\n",
    "        single = datas[i]\n",
    "        next_data = datas[i + 1]\n",
    "        sentence_next = next_data['sentence']\n",
    "        next_list = dataset.transfer_word_2_number(sentence_next)\n",
    "        sentence = single['sentence']\n",
    "        sen_list = dataset.transfer_word_2_number(sentence)\n",
    "        src.append(sen_list)\n",
    "        target.append(next_list)\n",
    "    src.append(dataset.pad_zero_with_list([]))\n",
    "    target.append(dataset.pad_zero_with_list([]))\n",
    "    src = src[:config.DATA_LENGTH]\n",
    "    target = target[:config.DATA_LENGTH]\n",
    "    if len(src) > config.DATA_LENGTH or len(target) > config.DATA_LENGTH:\n",
    "        raise (\"DataLength错误 src->,tar->\", len(src), len(target))\n",
    "    src_tensor = torch.tensor(src, device=device)\n",
    "    target_tensor = torch.tensor(target, device=device)\n",
    "\n",
    "    return src_tensor, target_tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_dim = 20922,emb_dim = 256\n"
     ]
    }
   ],
   "source": [
    "dataloader = train.train(fanzxl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Caught ValueError in DataLoader worker process 0.\nOriginal Traceback (most recent call last):\n  File \"c:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\_utils\\worker.py\", line 202, in _worker_loop\n    data = fetcher.fetch(index)\n  File \"c:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py\", line 47, in fetch\n    return self.collate_fn(data)\n  File \"c:\\Users\\Administrator\\emma\\train.py\", line 98, in collate_fn\n    src_tensor = torch.tensor(src, device=device)\nValueError: expected sequence of length 64 at dim 1 (got 86)\n",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32mc:\\Users\\Administrator\\emma\\test.ipynb Cell 7\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> <a href='vscode-notebook-cell:/c%3A/Users/Administrator/emma/test.ipynb#ch0000008?line=0'>1</a>\u001b[0m \u001b[39mfor\u001b[39;00m src, target \u001b[39min\u001b[39;00m dataloader:\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/Administrator/emma/test.ipynb#ch0000008?line=1'>2</a>\u001b[0m     \u001b[39mprint\u001b[39m(src\u001b[39m.\u001b[39msize())\n\u001b[0;32m      <a href='vscode-notebook-cell:/c%3A/Users/Administrator/emma/test.ipynb#ch0000008?line=2'>3</a>\u001b[0m     \u001b[39mprint\u001b[39m(target\u001b[39m.\u001b[39msize())\n",
      "File \u001b[1;32mc:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:517\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    515\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sampler_iter \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m    516\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_reset()\n\u001b[1;32m--> 517\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_next_data()\n\u001b[0;32m    518\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m1\u001b[39m\n\u001b[0;32m    519\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_dataset_kind \u001b[39m==\u001b[39m _DatasetKind\u001b[39m.\u001b[39mIterable \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m    520\u001b[0m         \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m    521\u001b[0m         \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m>\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called:\n",
      "File \u001b[1;32mc:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:1199\u001b[0m, in \u001b[0;36m_MultiProcessingDataLoaderIter._next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m   1197\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m   1198\u001b[0m     \u001b[39mdel\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_task_info[idx]\n\u001b[1;32m-> 1199\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_process_data(data)\n",
      "File \u001b[1;32mc:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:1225\u001b[0m, in \u001b[0;36m_MultiProcessingDataLoaderIter._process_data\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m   1223\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_try_put_index()\n\u001b[0;32m   1224\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(data, ExceptionWrapper):\n\u001b[1;32m-> 1225\u001b[0m     data\u001b[39m.\u001b[39;49mreraise()\n\u001b[0;32m   1226\u001b[0m \u001b[39mreturn\u001b[39;00m data\n",
      "File \u001b[1;32mc:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\_utils.py:429\u001b[0m, in \u001b[0;36mExceptionWrapper.reraise\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    425\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mgetattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mexc_type, \u001b[39m\"\u001b[39m\u001b[39mmessage\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m):\n\u001b[0;32m    426\u001b[0m     \u001b[39m# Some exceptions have first argument as non-str but explicitly\u001b[39;00m\n\u001b[0;32m    427\u001b[0m     \u001b[39m# have message field\u001b[39;00m\n\u001b[0;32m    428\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mexc_type(message\u001b[39m=\u001b[39mmsg)\n\u001b[1;32m--> 429\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mexc_type(msg)\n",
      "\u001b[1;31mValueError\u001b[0m: Caught ValueError in DataLoader worker process 0.\nOriginal Traceback (most recent call last):\n  File \"c:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\_utils\\worker.py\", line 202, in _worker_loop\n    data = fetcher.fetch(index)\n  File \"c:\\Users\\Administrator\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py\", line 47, in fetch\n    return self.collate_fn(data)\n  File \"c:\\Users\\Administrator\\emma\\train.py\", line 98, in collate_fn\n    src_tensor = torch.tensor(src, device=device)\nValueError: expected sequence of length 64 at dim 1 (got 86)\n"
     ]
    }
   ],
   "source": [
    "for src, target in dataloader:\n",
    "    print(src.size())\n",
    "    print(target.size())\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>sentence</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>呵呵</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>是王若猫的。</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>不是</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>那是什么？</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>怎么了</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7871</th>\n",
       "      <td>怎么啦不要不开心哦</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7872</th>\n",
       "      <td>情绪跌倒低谷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7873</th>\n",
       "      <td>孩子气的大傻瓜</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7874</th>\n",
       "      <td>你大爷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7875</th>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>7876 rows × 1 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       sentence\n",
       "0            呵呵\n",
       "1        是王若猫的。\n",
       "2            不是\n",
       "3         那是什么？\n",
       "4           怎么了\n",
       "...         ...\n",
       "7871  怎么啦不要不开心哦\n",
       "7872     情绪跌倒低谷\n",
       "7873    孩子气的大傻瓜\n",
       "7874        你大爷\n",
       "7875           \n",
       "\n",
       "[7876 rows x 1 columns]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.DataFrame(fanzxl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import config\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "attn = train.Attention(config.ENC_HID_DIM, config.DEC_HID_DIM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_dim = 2550,emb_dim = 256\n"
     ]
    }
   ],
   "source": [
    "enc = train.Encoder(len(token2idx), config.ENC_EMB_DIM, config.ENC_HID_DIM, config.DEC_HID_DIM, config.ENC_DROPOUT)\n",
    "dec = train.Decoder(len(token2idx), config.DEC_EMB_DIM, config.ENC_HID_DIM, config.DEC_HID_DIM, config.DEC_DROPOUT,\n",
    "                  attn)\n",
    "\n",
    "model = train.Seq2Seq(enc, dec, device).to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import random\n",
    "\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(input_dim, emb_dim)\n",
    "        self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)\n",
    "        self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, src): \n",
    "        '''\n",
    "        src = [src_len, batch_size]\n",
    "        '''\n",
    "        src = src.transpose(0, 1) # src = [batch_size, src_len]\n",
    "        embedded = self.dropout(self.embedding(src)).transpose(0, 1) # embedded = [src_len, batch_size, emb_dim]\n",
    "        \n",
    "        # enc_output = [src_len, batch_size, hid_dim * num_directions]\n",
    "        # enc_hidden = [n_layers * num_directions, batch_size, hid_dim]\n",
    "        enc_output, enc_hidden = self.rnn(embedded) # if h_0 is not give, it will be set 0 acquiescently\n",
    "\n",
    "        # enc_hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]\n",
    "        # enc_output are always from the last layer\n",
    "        \n",
    "        # enc_hidden [-2, :, : ] is the last of the forwards RNN \n",
    "        # enc_hidden [-1, :, : ] is the last of the backwards RNN\n",
    "        \n",
    "        # initial decoder hidden is final hidden state of the forwards and backwards \n",
    "        # encoder RNNs fed through a linear layer\n",
    "        # s = [batch_size, dec_hid_dim]\n",
    "        s = torch.tanh(self.fc(torch.cat((enc_hidden[-2,:,:], enc_hidden[-1,:,:]), dim = 1)))\n",
    "        \n",
    "        return enc_output, s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn.functional as F\n",
    "class Attention(nn.Module):\n",
    "    def __init__(self, enc_hid_dim, dec_hid_dim):\n",
    "        super().__init__()\n",
    "        self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim, bias=False)\n",
    "        self.v = nn.Linear(dec_hid_dim, 1, bias = False)\n",
    "        \n",
    "    def forward(self, s, enc_output):\n",
    "        \n",
    "        # s = [batch_size, dec_hid_dim]\n",
    "        # enc_output = [src_len, batch_size, enc_hid_dim * 2]\n",
    "        \n",
    "        batch_size = enc_output.shape[1]\n",
    "        src_len = enc_output.shape[0]\n",
    "        \n",
    "        # repeat decoder hidden state src_len times\n",
    "        # s = [batch_size, src_len, dec_hid_dim]\n",
    "        # enc_output = [batch_size, src_len, enc_hid_dim * 2]\n",
    "        s = s.unsqueeze(1).repeat(1, src_len, 1)\n",
    "        enc_output = enc_output.transpose(0, 1)\n",
    "        \n",
    "        # energy = [batch_size, src_len, dec_hid_dim]\n",
    "        energy = torch.tanh(self.attn(torch.cat((s, enc_output), dim = 2)))\n",
    "        \n",
    "        # attention = [batch_size, src_len]\n",
    "        attention = self.v(energy).squeeze(2)\n",
    "        \n",
    "        return F.softmax(attention, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Decoder(nn.Module):\n",
    "    def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):\n",
    "        super().__init__()\n",
    "        self.output_dim = output_dim\n",
    "        self.attention = attention\n",
    "        self.embedding = nn.Embedding(output_dim, emb_dim)\n",
    "        self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)\n",
    "        self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, dec_input, s, enc_output):\n",
    "             \n",
    "        # dec_input = [batch_size]\n",
    "        # s = [batch_size, dec_hid_dim]\n",
    "        # enc_output = [src_len, batch_size, enc_hid_dim * 2]\n",
    "        \n",
    "        dec_input = dec_input.unsqueeze(1) # dec_input = [batch_size, 1]\n",
    "        \n",
    "        embedded = self.dropout(self.embedding(dec_input)).transpose(0, 1) # embedded = [1, batch_size, emb_dim]\n",
    "        \n",
    "        # a = [batch_size, 1, src_len]  \n",
    "        a = self.attention(s, enc_output).unsqueeze(1)\n",
    "        \n",
    "        # enc_output = [batch_size, src_len, enc_hid_dim * 2]\n",
    "        enc_output = enc_output.transpose(0, 1)\n",
    "\n",
    "        # c = [1, batch_size, enc_hid_dim * 2]\n",
    "        c = torch.bmm(a, enc_output).transpose(0, 1)\n",
    "\n",
    "        # rnn_input = [1, batch_size, (enc_hid_dim * 2) + emb_dim]\n",
    "        rnn_input = torch.cat((embedded, c), dim = 2)\n",
    "            \n",
    "        # dec_output = [src_len(=1), batch_size, dec_hid_dim]\n",
    "        # dec_hidden = [n_layers * num_directions, batch_size, dec_hid_dim]\n",
    "        dec_output, dec_hidden = self.rnn(rnn_input, s.unsqueeze(0))\n",
    "        \n",
    "        # embedded = [batch_size, emb_dim]\n",
    "        # dec_output = [batch_size, dec_hid_dim]\n",
    "        # c = [batch_size, enc_hid_dim * 2]\n",
    "        embedded = embedded.squeeze(0)\n",
    "        dec_output = dec_output.squeeze(0)\n",
    "        c = c.squeeze(0)\n",
    "        \n",
    "        # pred = [batch_size, output_dim]\n",
    "        pred = self.fc_out(torch.cat((dec_output, c, embedded), dim = 1))\n",
    "        \n",
    "        return pred, dec_hidden.squeeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Seq2Seq(nn.Module):\n",
    "    def __init__(self, encoder, decoder, device):\n",
    "        super().__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "        self.device = device\n",
    "        \n",
    "    def forward(self, src, trg, teacher_forcing_ratio = 0.5):\n",
    "        \n",
    "        # src = [src_len, batch_size]\n",
    "        # trg = [trg_len, batch_size]\n",
    "        # teacher_forcing_ratio is probability to use teacher forcing\n",
    "        \n",
    "        batch_size = src.shape[1]\n",
    "        trg_len = trg.shape[0]\n",
    "        trg_vocab_size = self.decoder.output_dim\n",
    "        \n",
    "        # tensor to store decoder outputs\n",
    "        outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)\n",
    "        \n",
    "        # enc_output is all hidden states of the input sequence, back and forwards\n",
    "        # s is the final forward and backward hidden states, passed through a linear layer\n",
    "        enc_output, s = self.encoder(src)\n",
    "                \n",
    "        # first input to the decoder is the <sos> tokens\n",
    "        dec_input = trg[0,:]\n",
    "        \n",
    "        for t in range(1, trg_len):\n",
    "            \n",
    "            # insert dec_input token embedding, previous hidden state and all encoder hidden states\n",
    "            # receive output tensor (predictions) and new hidden state\n",
    "            dec_output, s = self.decoder(dec_input, s, enc_output)\n",
    "            \n",
    "            # place predictions in a tensor holding predictions for each token\n",
    "            outputs[t] = dec_output\n",
    "            \n",
    "            # decide if we are going to use teacher forcing or not\n",
    "            teacher_force = random.random() < teacher_forcing_ratio\n",
    "            \n",
    "            # get the highest predicted token from our predictions\n",
    "            top1 = dec_output.argmax(1) \n",
    "            \n",
    "            # if teacher forcing, use actual next token as next input\n",
    "            # if not, use predicted token\n",
    "            dec_input = trg[t] if teacher_force else top1\n",
    "\n",
    "        return outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "INPUT_DIM = len(token2idx)\n",
    "OUTPUT_DIM = len(token2idx)\n",
    "ENC_EMB_DIM = 256\n",
    "DEC_EMB_DIM = 256\n",
    "ENC_HID_DIM = 512\n",
    "DEC_HID_DIM = 512\n",
    "ENC_DROPOUT = 0.5\n",
    "DEC_DROPOUT = 0.5\n",
    "\n",
    "#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "device = torch.device('cpu')\n",
    "\n",
    "attn = Attention(ENC_HID_DIM, DEC_HID_DIM)\n",
    "enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)\n",
    "dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)\n",
    "\n",
    "model = Seq2Seq(enc, dec, device).to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Seq2Seq(\n",
       "  (encoder): Encoder(\n",
       "    (embedding): Embedding(2550, 256)\n",
       "    (rnn): GRU(256, 512, bidirectional=True)\n",
       "    (fc): Linear(in_features=1024, out_features=512, bias=True)\n",
       "    (dropout): Dropout(p=0.5, inplace=False)\n",
       "  )\n",
       "  (decoder): Decoder(\n",
       "    (attention): Attention(\n",
       "      (attn): Linear(in_features=1536, out_features=512, bias=False)\n",
       "      (v): Linear(in_features=512, out_features=1, bias=False)\n",
       "    )\n",
       "    (embedding): Embedding(2550, 256)\n",
       "    (rnn): GRU(1280, 512)\n",
       "    (fc_out): Linear(in_features=1792, out_features=2550, bias=True)\n",
       "    (dropout): Dropout(p=0.5, inplace=False)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
      "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
      "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
      "         ...,\n",
      "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
      "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
      "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000]],\n",
      "\n",
      "        [[-0.3132,  0.4082,  0.4235,  ..., -0.2946, -0.1335,  0.2648],\n",
      "         [ 0.4460, -0.3670,  0.0162,  ...,  0.2193, -0.2497,  0.5355],\n",
      "         [ 0.3194,  0.0114, -0.3301,  ...,  0.3889, -0.2899, -0.5808],\n",
      "         ...,\n",
      "         [-0.0238,  0.2380, -0.0585,  ..., -0.0641,  0.2613,  0.0559],\n",
      "         [ 0.0228,  0.2114, -0.1447,  ...,  0.0263, -0.1958, -0.1482],\n",
      "         [-0.3702,  0.2105,  0.0446,  ..., -0.0314, -0.2882, -0.0582]],\n",
      "\n",
      "        [[ 0.1916, -0.0431,  0.1872,  ...,  0.0284, -0.1940,  0.2577],\n",
      "         [ 0.3006, -0.1008, -0.2303,  ..., -0.0645, -0.2563,  0.0866],\n",
      "         [ 0.8502, -0.1966, -0.7753,  ..., -0.4811,  0.1113,  0.2357],\n",
      "         ...,\n",
      "         [-0.1841, -0.3408, -0.1145,  ...,  0.0845,  0.0054, -0.5488],\n",
      "         [ 0.2627,  0.1843, -0.6828,  ..., -0.0328, -0.2795, -0.2341],\n",
      "         [-0.0926,  0.0839, -0.4359,  ...,  0.3045, -0.5232, -0.2856]],\n",
      "\n",
      "        ...,\n",
      "\n",
      "        [[-0.3137,  0.1374, -0.2800,  ..., -0.1871, -0.2681,  0.4749],\n",
      "         [ 0.0410,  0.2823, -0.0132,  ..., -0.1099,  0.9011, -0.3687],\n",
      "         [-0.0877, -0.1322, -0.0308,  ...,  0.2943,  0.1624,  0.0826],\n",
      "         ...,\n",
      "         [ 0.0245, -0.3428,  0.2146,  ...,  0.0872, -0.8060, -0.3310],\n",
      "         [-0.4308, -0.0322,  0.4208,  ..., -0.2487, -0.0354, -0.2379],\n",
      "         [ 0.3752,  0.0676,  0.2799,  ..., -0.2290, -0.3823, -0.2527]],\n",
      "\n",
      "        [[-0.0165,  0.2820,  0.1105,  ..., -0.0786,  0.0752,  0.2376],\n",
      "         [ 0.4404,  0.0050, -0.3564,  ..., -0.0215, -0.0599,  0.2258],\n",
      "         [ 0.5266, -0.6657,  0.0781,  ...,  0.4507,  0.5590,  0.3986],\n",
      "         ...,\n",
      "         [-0.2708,  0.0931, -0.1295,  ..., -0.3273, -0.9233, -0.3098],\n",
      "         [ 0.1201, -0.1506, -0.4863,  ..., -0.2399, -0.3824, -0.5117],\n",
      "         [ 0.3746, -0.4193, -0.3761,  ...,  0.8014, -0.0230,  0.1718]],\n",
      "\n",
      "        [[-0.8245,  0.4341, -0.1304,  ..., -0.4240, -0.3462, -0.0959],\n",
      "         [ 0.3247, -0.0836, -0.0735,  ...,  0.0208, -0.2377,  0.7391],\n",
      "         [ 0.2165,  0.2543, -0.1075,  ...,  0.2684, -0.3926, -0.4887],\n",
      "         ...,\n",
      "         [-0.2933,  0.1054, -0.0169,  ..., -0.2086, -0.0973, -0.1963],\n",
      "         [-0.2117,  0.1403, -0.5279,  ...,  0.3628, -0.2782, -0.6591],\n",
      "         [-0.0758,  0.2163, -0.3155,  ..., -0.0536, -0.1313, -0.6967]]],\n",
      "       grad_fn=<CopySlices>)\n"
     ]
    }
   ],
   "source": [
    "for q, a in dataloader:\n",
    "    out = model(q, a)\n",
    "    print (out)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000]],\n",
       "\n",
       "        [[-0.4400,  0.0810, -0.6297,  ...,  0.2353, -0.6468, -0.5177]],\n",
       "\n",
       "        [[ 0.2102,  0.0877, -0.1790,  ..., -0.1586, -0.3182,  0.4279]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[-0.2088,  0.0673, -0.6844,  ...,  0.3064, -0.4840, -0.2804]],\n",
       "\n",
       "        [[ 0.3626,  0.0133, -0.3101,  ...,  0.1164, -0.3262, -0.7206]],\n",
       "\n",
       "        [[-0.0379, -0.1657, -0.1510,  ...,  0.4023, -0.2602, -0.2053]]],\n",
       "       grad_fn=<CopySlices>)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model(torch.rand(21, 1).long(), torch.rand(21, 1).long())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.8.13 ('py38')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "2aed9fa7d8bcea3012a6923c496fbc67499bfca4052cc7c7c55fbe40e4e4d851"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
