{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1453eba8",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import torch \n",
    "from torch.utils.data import DataLoader\n",
    "from torch.nn.functional import cross_entropy,softmax\n",
    "\n",
    "class Seq2Seq(nn.Module):\n",
    "    def __init__(self,enc_v_dim, dec_v_dim, emb_dim, hidden_size, max_pred_len, start_token, end_token):\n",
    "        super().__init__()\n",
    "        self.hidden_size = hidden_size                  # hidden_size\n",
    "        self.dec_v_dim = dec_v_dim\n",
    "        self.max_pred_len = max_pred_len\n",
    "        self.start_token = start_token\n",
    "        self.end_token = end_token\n",
    "\n",
    "        # encoder\n",
    "        self.enc_embeddings = nn.Embedding(enc_v_dim,emb_dim)\n",
    "        self.enc_embeddings.weight.data.normal_(0,0.1)\n",
    "        self.encoder = nn.LSTM(emb_dim,hidden_size,1,batch_first=True)\n",
    "\n",
    "        # decoder\n",
    "        self.dec_embeddings = nn.Embedding(dec_v_dim,emb_dim)\n",
    "        self.attn = nn.Linear(hidden_size,hidden_size)\n",
    "        self.decoder_cell = nn.LSTMCell(emb_dim,hidden_size)\n",
    "        self.decoder_dense = nn.Linear(hidden_size*2,dec_v_dim)\n",
    "\n",
    "        self.opt = torch.optim.Adam(self.parameters(),lr=0.001)\n",
    "        \n",
    "    \n",
    "    def encode(self,x):\n",
    "        embedded = self.enc_embeddings(x)   # [batch_size, seq_len, emb_dim]\n",
    "        hidden = (torch.zeros(1,x.shape[0],self.hidden_size),torch.zeros(1,x.shape[0],self.hidden_size))\n",
    "        o,(h,c) = self.encoder(embedded,hidden) # [batch_size, seq_len, hidden_size], [num_layers * num_directions, batch_size, hidden_size]\n",
    "        return o,h,c\n",
    "    \n",
    "    def inference(self,x,return_align=False):\n",
    "        self.eval()\n",
    "        # x [batch_size,seq_len]\n",
    "        o,hx,cx = self.encode(x)    # [batch_size, seq_len, hidden_size], [num_layers * num_directions, batch_size, hidden_size] * 2\n",
    "        hx,cx = hx[0],cx[0]         # [batch_size, hidden_size]\n",
    "        start = torch.ones(x.shape[0],1)    # [batch_size, 1]\n",
    "        start[:,0] = torch.tensor(self.start_token)\n",
    "        start= start.type(torch.LongTensor)\n",
    "        dec_emb_in = self.dec_embeddings(start) # [batch_size, 1, emb_dim]\n",
    "        dec_emb_in = dec_emb_in.permute(1,0,2)  # [1, batch_size, emb_dim]\n",
    "        dec_in = dec_emb_in[0]                  # [batch_size, emb_dim]\n",
    "        output = []\n",
    "        for i in range(self.max_pred_len):\n",
    "            # hx.unsqueeze(1)在第一个维度上添加一个维度\n",
    "            score = torch.matmul(self.attn(hx.unsqueeze(1)),o.permute(0,2,1)) # [batch_size, 1, seq_len]\n",
    "            score01 = softmax(score, dim=2)  # [batch_size, 1, seq_len]\n",
    "            attnDistribute = torch.matmul(score01,o)    # [batch_size, 1, hidden_size]\n",
    "           \n",
    "            hx, cx = self.decoder_cell(dec_in, (hx, cx))\n",
    "            ha = torch.cat([attnDistribute.squeeze(1),hx],dim=1)           # [batch_size, hidden_size *2]\n",
    "            result = self.decoder_dense(ha)\n",
    "            result = result.argmax(dim=1).view(-1,1)\n",
    "            dec_in=self.dec_embeddings(result).permute(1,0,2)[0]\n",
    "            output.append(result)\n",
    "        output = torch.stack(output,dim=0)\n",
    "        self.train()\n",
    "\n",
    "        return output.permute(1,0,2).view(-1,self.max_pred_len)\n",
    "    \n",
    "    def train_logit(self,x,y):\n",
    "        o,hx,cx = self.encode(x)    # [batch_size, seq_len, hidden_size], [num_layers * num_directions, batch_size, hidden_size] * 2\n",
    "        hx,cx = hx[0],cx[0]         # [batch_size, hidden_size]\n",
    "        dec_in = y[:,:-1]           # [batch_size, seq_len]\n",
    "        dec_emb_in = self.dec_embeddings(dec_in)    # [batch_size, seq_len, emb_dim]\n",
    "        dec_emb_in = dec_emb_in.permute(1,0,2)      # [seq_len, batch_size, emb_dim]\n",
    "        output = []\n",
    "        for i in range(dec_emb_in.shape[0]):\n",
    "            score = torch.matmul(self.attn(hx.unsqueeze(1)),o.permute(0,2,1)) # [batch_size, 1, seq_len]\n",
    "            score01 = softmax(score, dim=2)  # [batch_size, 1, seq_len]\n",
    "            attnDistribute = torch.matmul(score01,o)    # [batch_size, 1, hidden_size]\n",
    "           \n",
    "            hx, cx = self.decoder_cell(dec_emb_in[i], (hx, cx))     # [batch_size, hidden_size]\n",
    "            ha = torch.cat([attnDistribute.squeeze(1),hx],dim=1)           # [batch_size, hidden_size *2]\n",
    "            \n",
    "            result = self.decoder_dense(ha)                              # [batch_size, dec_v_dim]\n",
    "            output.append(result)\n",
    "        output = torch.stack(output,dim=0)  # [seq_len, batch_size, dec_v_dim]\n",
    "        return output.permute(1,0,2)        # [batch_size, seq_len, dec_v_dim]\n",
    "    \n",
    "    def step(self,x,y):\n",
    "        self.opt.zero_grad()\n",
    "        logit = self.train_logit(x,y)    \n",
    "        dec_out = y[:,1:]\n",
    "        loss = cross_entropy(logit.reshape(-1,self.dec_v_dim),dec_out.reshape(-1))\n",
    "        loss.backward()\n",
    "        self.opt.step()\n",
    "        return loss.detach().numpy()\n",
    "\n",
    "# dataset = utils.DateData(4000)\n",
    "\n",
    "# loader = DataLoader(\n",
    "#     dataset,\n",
    "#     batch_size=32,\n",
    "#     shuffle=True\n",
    "# )\n",
    "\n",
    "# model = Seq2Seq(\n",
    "#     dataset.num_word,\n",
    "#     dataset.num_word,\n",
    "#     emb_dim=16,\n",
    "#     hidden_size=32,\n",
    "#     max_pred_len=11,\n",
    "#     start_token=dataset.start_token,\n",
    "#     end_token=dataset.end_token\n",
    "# )\n",
    "\n",
    "# def train():\n",
    "#     for i in range(100):\n",
    "#         for batch_idx , batch in enumerate(loader):\n",
    "#             bx, by, _ = batch\n",
    "#             loss = model.step(bx,by)\n",
    "#             if batch_idx % 70 == 0:\n",
    "#                 target = dataset.idx2str(by[0, 1:-1].data.numpy())\n",
    "#                 pred = model.inference(bx[0:1])\n",
    "#                 res = dataset.idx2str(pred[0].data.numpy())\n",
    "#                 src = dataset.idx2str(bx[0].data.numpy())\n",
    "#                 print(\n",
    "#                     \"Epoch: \",i,\n",
    "#                     \"| t: \", batch_idx,\n",
    "#                     \"| loss: %.3f\" % loss,\n",
    "#                     \"| input: \", src,\n",
    "#                     \"| target: \", target,\n",
    "#                     \"| inference: \", res,\n",
    "#                 )\n",
    "#     # pkl_data = {\"i2v\": dataset.i2v, \"x\": dataset.x[:6], \"y\": dataset.y[:6], \"align\": model.inference(dataset.x[:6], return_align=True)}\n",
    "\n",
    "#     # with open(\"./visual/tmp/attention_align.pkl\", \"wb\") as f:\n",
    "#     #     pickle.dump(pkl_data, f)\n",
    "\n",
    "# if __name__ == \"__main__\":\n",
    "#     train()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eef755c0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "054c0a2d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "03479f0e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d4563229",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
