{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9392065f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/st/miniforge3/envs/tf/lib/python3.8/site-packages/torch/nn/modules/rnn.py:62: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.5 and num_layers=1\n",
      "  warnings.warn(\"dropout option adds dropout after all but last \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.3151235580444336\n",
      "1.7279229164123535\n",
      "1.1442941427230835\n",
      "0.6813140511512756\n",
      "0.4089007079601288\n",
      "0.2551581561565399\n",
      "0.17389316856861115\n",
      "0.12628521025180817\n",
      "0.09564514458179474\n",
      "0.07477911561727524\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import random\n",
    "\n",
    "SEED = 1\n",
    "random.seed(SEED)\n",
    "torch.manual_seed(SEED)\n",
    "\n",
    "torch.backends.cudnn.deterministic = True\n",
    "\n",
    "src_vocab_size = 30\n",
    "trg_vocab_size = 30\n",
    "emd_dim = 64\n",
    "hid_size = 128\n",
    "n_layers = 1\n",
    "dropout = 0.5\n",
    "\n",
    "class Enconder(nn.Module):\n",
    "    def __init__(self, vocab_size, emd_dim=64, hid_size=128, n_layers=1, dropout=0.5):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, emd_dim)\n",
    "        self.rnn = nn.LSTM(emd_dim, hid_size, num_layers=n_layers, dropout=dropout, batch_first=True)\n",
    "        \n",
    "    def forward(self, x):  # x [batch_size, seq_len]\n",
    "        x = self.embedding(x) # x [batch_size, seq_len, embed_dim]\n",
    "        # outputs: [batch_size, seq_len, rnn_hider]  \n",
    "        # hidden : [n_layers, batch_size, rnn_hidden]\n",
    "        # cell   : [n_layers, batch_size, rnn_hidden]\n",
    "        outputs, (hidden, cell) = self.rnn(x)\n",
    "        return hidden, cell\n",
    "\n",
    "class Deconder(nn.Module):\n",
    "    def __init__(self, vocab_size, emd_dim=64, hid_size=128, n_layers=1, dropout=0.5):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, emd_dim)\n",
    "        self.rnn = nn.LSTM(emd_dim, hid_size, num_layers=n_layers, dropout=dropout, batch_first=True)\n",
    "        self.out = nn.Linear(hid_size, vocab_size)\n",
    "        \n",
    "    def forward(self, x, hidden, cell):\n",
    "        # 输入一个 time step\n",
    "        # x [bacth_size, 1]  hidden:[n_layers, batch_size, rnn_hidden] cell : [n_layers, batch_size, rnn_hidden]\n",
    "        x = self.embedding(x) # x [bacth_size, 1, emd_dim]\n",
    "        # outputs: [1, seq_len, rnn_hider]  \n",
    "        # hidden : [n_layers, 1, rnn_hidden]\n",
    "        # cell   : [n_layers, 1, rnn_hidden]\n",
    "        outputs, (hidden, cell) = self.rnn(x, (hidden, cell))   \n",
    "        x = self.out(outputs) # [n_layers, 1, vocab_size]\n",
    "        return x, hidden, cell\n",
    "        \n",
    "class Seq2Seq(nn.Module):\n",
    "    \"\"\" 用于训练的seq2seq模型 \"\"\"\n",
    "    def __init__(self, encoder, decoder):\n",
    "        super().__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "    \n",
    "    def forward(self, src, trg):  \n",
    "        # [n_layers, batch_size, rnn_hidden] [n_layers, batch_size, rnn_hidden]\n",
    "        hidden, cell = self.encoder(src)\n",
    "        t_outputs = []  \n",
    "        for t in range(trg.shape[1]):\n",
    "            x = trg[:, t]\n",
    "            x = x.unsqueeze(-1)\n",
    "            outputs, hidden, cell = self.decoder(x, hidden, cell)\n",
    "            t_outputs.append(outputs)\n",
    "        t_outputs = torch.cat(t_outputs, 1)  \n",
    "        return t_outputs\n",
    "    \n",
    "    def pred(self, src, max_len=10, start_index=2):\n",
    "        hidden, cell = self.encoder(src)\n",
    "        x = torch.full((src.shape[0], 1), start_index).to(torch.long)  \n",
    "        t_outputs = []\n",
    "        for t in range(max_len):  \n",
    "            outputs, hidden, cell = self.decoder(x, hidden, cell)\n",
    "            x = torch.argmax(outputs, dim=-1)  \n",
    "            t_outputs.append(x)\n",
    "        t_outputs = torch.cat(t_outputs, 1)   \n",
    "        return t_outputs   \n",
    "        \n",
    "encoder = Enconder(30, emd_dim, hid_size, n_layers, dropout)\n",
    "decoder = Deconder(30, emd_dim, hid_size, n_layers, dropout)\n",
    "model = Seq2Seq(encoder, decoder)\n",
    "\n",
    "seq2seq_p = Seq2Seq_Pred(encoder, decoder)\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters())\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=0)\n",
    "\n",
    "for i in range(10):\n",
    "    train_iter = tain_iter()\n",
    "    for i, (src, tti, tto) in enumerate(train_iter):\n",
    "        src = torch.LongTensor(src)\n",
    "        tti = torch.LongTensor(tti)\n",
    "        tto = torch.LongTensor(tto)\n",
    "        optimizer.zero_grad()\n",
    "        output = model.forward(src, tti)\n",
    "\n",
    "        tto = tto.reshape(-1)\n",
    "        output = output.reshape(-1, output.shape[-1])\n",
    "        loss = criterion(output, tto)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "    print(f\"{loss}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7149d90e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "709b7977",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "data_path = \"letters_source.txt\"\n",
    "target_path = \"letters_target.txt\"\n",
    "with open(data_path, 'r') as f:\n",
    "    data_s = f.read().split('\\n')\n",
    "    \n",
    "with open(target_path, 'r') as f:\n",
    "    target_s = f.read().split('\\n')\n",
    "    \n",
    "data_chars = sorted(list(set(\"\".join(data_s))))\n",
    "taget_chars = sorted(list(set(\"\".join(target_s))))\n",
    "pad_flage = '<PAD>'\n",
    "unk_flage = '<UNK>'\n",
    "beg_flage = '<GO>'\n",
    "end_flage = '<EOS>'\n",
    "data_chars = [pad_flage, unk_flage, beg_flage, end_flage] + data_chars\n",
    "taget_chars = [pad_flage, unk_flage, beg_flage, end_flage] + taget_chars\n",
    "\n",
    "data_c2i = {char: i for i, char in enumerate(data_chars)}\n",
    "taget_c2i = {char: i for i, char in enumerate(taget_chars)}\n",
    "taget_i2c = {i: char for i, char in enumerate(taget_chars)}\n",
    "\n",
    "max_len = 10\n",
    "\n",
    "data = []\n",
    "for line_ in data_s:\n",
    "    line = [data_c2i.get(char, 1) for char in  line_]\n",
    "    line = line[:max_len]\n",
    "    line = line + [0]*(max_len - len(line))\n",
    "    data.append(line)\n",
    "target_i = []\n",
    "target_o = []\n",
    "for line_ in target_s:\n",
    "    line = [2] + [taget_c2i.get(char, 1) for char in  line_] + [3]\n",
    "    line = line[:max_len]\n",
    "    line = line + [0]*(max_len - len(line))\n",
    "    \n",
    "    target_i.append(line)\n",
    "    line = [taget_c2i.get(char, 1) for char in  line_] + [3]\n",
    "    line = line[:max_len]\n",
    "    line = line + [0]*(max_len - len(line))\n",
    "    target_o.append(line)\n",
    "    \n",
    "    \n",
    "data = np.array(data)\n",
    "target_i = np.array(target_i)\n",
    "target_o = np.array(target_o)\n",
    "\n",
    "split_index = int(len(data)*0.7)\n",
    "train_data = data[:split_index]\n",
    "train_target_i = target_i[:split_index]\n",
    "train_target_o = target_o[:split_index]\n",
    "\n",
    "test_data = data[split_index:]\n",
    "target_data = target_o[split_index:]\n",
    "\n",
    "def tain_iter(batch_size=128):\n",
    "    num = len(train_data)//batch_size\n",
    "    for i in range(num):\n",
    "        beg_index = i*batch_size\n",
    "        end_index = (i+1)*batch_size\n",
    "        td = train_data[beg_index: end_index]\n",
    "        tti = train_target_i[beg_index: end_index]\n",
    "        tto = train_target_o[beg_index: end_index]\n",
    "        yield td, tti, tto\n",
    "\n",
    "def test_iter(batch_size=128):\n",
    "    num = len(test_data)//batch_size\n",
    "    for i in range(num):\n",
    "        beg_index = i*batch_size\n",
    "        end_index = (i+1)*batch_size\n",
    "\n",
    "        yield test_data[beg_index: end_index], target_data[beg_index: end_index]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d5dff78",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
