{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70a34f13",
   "metadata": {},
   "outputs": [],
   "source": [
    "from io import open\n",
    "# 使用正则表达式辅助子目录查询\n",
    "import glob\n",
    "import os\n",
    "# 用于获得常见字符及字符规范化\n",
    "import string\n",
    "import unicodedata\n",
    "import random\n",
    "import time\n",
    "import math\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "%matplotlib inline\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f53c6e19",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 对data文件中的数据进行预处理\n",
    "all_letters = string.ascii_letters + \" .,;'\"\n",
    "n_letters = len(all_letters)\n",
    "n_letters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c92767f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "def unicode2ascii(s):\n",
    "    '''\n",
    "    去掉语言中的一些重音\n",
    "    '''\n",
    "    return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c656fdca",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = 'data/names/'\n",
    "\n",
    "def read_lines(file_name):\n",
    "    lines = open(file_name, encoding='utf-8').read().strip().split('\\n')\n",
    "    return [unicode2ascii(line) for line in lines]\n",
    "\n",
    "file_name = data_path + 'Chinese.txt'\n",
    "result = read_lines(file_name)\n",
    "print(result[0:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6e650a39",
   "metadata": {},
   "outputs": [],
   "source": [
    "category_lines = {}\n",
    "all_categories = []\n",
    "\n",
    "for file_name in glob.glob(data_path+'*.txt'):\n",
    "    category = os.path.splitext(os.path.basename(file_name))[0]\n",
    "    all_categories.append(category)\n",
    "    lines = read_lines(file_name)\n",
    "    category_lines[category] = lines\n",
    "    \n",
    "n_categories = len(all_categories)\n",
    "print(n_categories)\n",
    "\n",
    "print(category_lines['Italian'][:5])\n",
    "\n",
    "print(len(category_lines['Chinese']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2c09fc34",
   "metadata": {},
   "outputs": [],
   "source": [
    "def line2tensor(line):\n",
    "    '''\n",
    "    将人名转化为相应的one-hot张量表示，line为输入的人名\n",
    "    '''\n",
    "    tensor = torch.zeros(len(line), 1, n_letters)\n",
    "    for li, letter in enumerate(line):\n",
    "        tensor[li][0][all_letters.find(letter)] = 1\n",
    "    \n",
    "    return tensor\n",
    "\n",
    "line = 'Bai'\n",
    "line_tensor = line2tensor(line)\n",
    "line_tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1dfbdf65",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建RNN模型\n",
    "class RNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        super(RNN, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.rnn = nn.RNN(input_size, hidden_size, num_layers)\n",
    "        self.linear = nn.Linear(hidden_size, output_size)  # 将nn.RNN的输出维度转化为指定的输出维度\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "        \n",
    "    def forward(self, x, hidden):\n",
    "        '''\n",
    "        x.shape: 1*n_letters\n",
    "        '''\n",
    "        x = x.unsqueeze(0)\n",
    "        y, hn = self.rnn(x, hidden)\n",
    "        return self.softmax(self.linear(y)), hn\n",
    "    \n",
    "    def init_hidden(self):\n",
    "        return torch.zeros(self.num_layers, 1, self.hidden_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b00329b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTM(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        super(LSTM, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        \n",
    "        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)\n",
    "        self.linear = nn.Linear(hidden_size, output_size)\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "        \n",
    "    def forward(self, x, hidden, c):\n",
    "        x = x.unsqueeze(0)\n",
    "        y, (hn, c) = self.lstm(x, (hidden,c))\n",
    "        return self.softmax(self.linear(y)), hn, c\n",
    "    \n",
    "    def init_hidden_c(self):\n",
    "        c = hidden = torch.zeros(self.num_layers, 1, self.hidden_size)\n",
    "        return hidden, c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62a1f624",
   "metadata": {},
   "outputs": [],
   "source": [
    "class GRU(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        super(GRU, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.gru = nn.GRU(input_size, hidden_size, num_layers)\n",
    "        self.linear = nn.Linear(hidden_size, output_size)  # 将nn.RNN的输出维度转化为指定的输出维度\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "        \n",
    "    def forward(self, x, hidden):\n",
    "        '''\n",
    "        x.shape: 1*n_letters\n",
    "        '''\n",
    "        x = x.unsqueeze(0)\n",
    "        y, hn = self.gru(x, hidden)\n",
    "        return self.softmax(self.linear(y)), hn\n",
    "    \n",
    "    def init_hidden(self):\n",
    "        return torch.zeros(self.num_layers, 1, self.hidden_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5bd3c19",
   "metadata": {},
   "outputs": [],
   "source": [
    "input_size = n_letters\n",
    "n_hidden = 128\n",
    "output_size = n_categories\n",
    "\n",
    "rnn = RNN(n_letters, n_hidden, n_categories)\n",
    "lstm = LSTM(n_letters, n_hidden, n_categories)\n",
    "gru = GRU(n_letters, n_hidden, n_categories)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "331e121c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试\n",
    "x = line2tensor('B').squeeze(0)\n",
    "hidden = c = torch.zeros(1, 1, n_hidden)\n",
    "\n",
    "rnn_out, next_hidden = rnn(x, hidden)\n",
    "print(rnn_out)\n",
    "\n",
    "lstm_out, next_hidden, c = lstm(x, hidden, c)\n",
    "print(lstm_out)\n",
    "\n",
    "gru_out, next_hidden = gru(x, hidden)\n",
    "print(gru_out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35483bc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_category(output):\n",
    "    top_n, top_i = output.topk(1)  # 返回最大的值和索引\n",
    "    category_i = top_i[0].item()\n",
    "    \n",
    "    return all_categories[category_i], category_i\n",
    "\n",
    "# 测试\n",
    "get_category(gru_out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "934731f2",
   "metadata": {},
   "outputs": [],
   "source": [
    "def random_training_sample():\n",
    "    '''\n",
    "    随机产生训练数据\n",
    "    '''\n",
    "    category = random.choice(all_categories)\n",
    "    line = random.choice(category_lines[category])\n",
    "    category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)  # 将类别名转换为索引\n",
    "    line_tensor = line2tensor(line)\n",
    "    return category, line, category_tensor, line_tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34148263",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建训练函数并进行训练\n",
    "\n",
    "criterion = nn.NLLLoss()  # NLLLoss与nn.LogSoftmax逻辑匹配\n",
    "learning_rate = 0.005\n",
    "\n",
    "def train_rnn(category_tensor, line_tensor):\n",
    "    hidden = rnn.init_hidden()\n",
    "    rnn.zero_grad()\n",
    "    \n",
    "    # 将训练数据line_tensor的每个字符逐个传入rnn之中，得到最终结果\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = rnn(line_tensor[i], hidden)\n",
    "    \n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "    loss.backward()\n",
    "    \n",
    "    for p in rnn.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "        \n",
    "    return output, loss.item()\n",
    "\n",
    "def train_lstm(category_tensor, line_tensor):\n",
    "    hidden, c = lstm.init_hidden_c()\n",
    "    lstm.zero_grad()\n",
    "    \n",
    "    # 将训练数据line_tensor的每个字符逐个传入rnn之中，得到最终结果\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden, c = lstm(line_tensor[i], hidden, c)\n",
    "    \n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "    loss.backward()\n",
    "    \n",
    "    for p in lstm.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "        \n",
    "    return output, loss.item()\n",
    "\n",
    "def train_gru(category_tensor, line_tensor):\n",
    "    hidden = gru.init_hidden()\n",
    "    gru.zero_grad()\n",
    "    \n",
    "    # 将训练数据line_tensor的每个字符逐个传入rnn之中，得到最终结果\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = gru(line_tensor[i], hidden)\n",
    "    \n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "    loss.backward()\n",
    "    \n",
    "    for p in gru.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "        \n",
    "    return output, loss.item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed282044",
   "metadata": {},
   "outputs": [],
   "source": [
    "def time_spent(start_time):\n",
    "    '''\n",
    "    计算clock time\n",
    "    '''\n",
    "    now = time.time()\n",
    "    s = now-start_time\n",
    "    m = s//60\n",
    "    s = s%60\n",
    "    return f'{m}min, {s}sec'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5128cdbb",
   "metadata": {},
   "outputs": [],
   "source": [
    "n_iters = 60000\n",
    "print_every = 50\n",
    "plot_every = 10\n",
    "\n",
    "def train(train_fn):\n",
    "    all_losses = []\n",
    "    start = time.time()\n",
    "    current_loss = 0\n",
    "    for iter in range(1, n_iters+1):\n",
    "        category, line, category_tensor, line_tensor = random_training_sample()\n",
    "        output, loss = train_fn(category_tensor, line_tensor)\n",
    "        current_loss += loss\n",
    "        if iter%print_every == 0:\n",
    "            guess, guess_i = get_category(output)\n",
    "            correct = 'True' if guess == category else f'False {category}'\n",
    "            print(f'{time_spent(start)}--{iter/n_iters*100}%--{iter}: loss: {loss}, name: {line}, predict: {guess}, {correct}')\n",
    "        \n",
    "        if iter%plot_every == 0:\n",
    "            all_losses.append(current_loss/plot_every)\n",
    "            current_loss = 0\n",
    "    return all_losses, int(time.time() - start)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8f2865a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# start training\n",
    "rnn_loss, rnn_time = train(train_rnn)\n",
    "lstm_loss, lstm_time = train(train_lstm)\n",
    "gru_loss, gru_time = train(train_gru)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9102cb93",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绘制损失图\n",
    "plt.figure(0)\n",
    "plt.plot(rnn_loss[:300], color='green', label='RNN')\n",
    "plt.plot(lstm_loss[:300], color='red', label='LSTM')\n",
    "plt.plot(gru_loss[:300], color='blue', label='GRU')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec1a61fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绘制耗时图\n",
    "plt.figure(1)\n",
    "x_data = ['RNN', 'LSTM', 'GRU']\n",
    "y_data = [rnn_time, lstm_time, gru_time]\n",
    "plt.bar(range(len(x_data)), y_data, tick_label=x_data)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9906457b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建评估函数\n",
    "def evl_rnn(line_tensor):\n",
    "    hidden = rnn.init_hidden()\n",
    "    \n",
    "    # 将训练数据line_tensor的每个字符逐个传入rnn之中，得到最终结果\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = rnn(line_tensor[i], hidden)\n",
    "        \n",
    "    return output.squeeze(0)\n",
    "\n",
    "def evl_lstm(line_tensor):\n",
    "    hidden, c = lstm.init_hidden_c()\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden, c = lstm(line_tensor[i], hidden, c)\n",
    "    return output.squeeze(0)\n",
    "\n",
    "def evl_gru(line_tensor):\n",
    "    hidden = gru.init_hidden()\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = gru(line_tensor[i], hidden)\n",
    "    return output.squeeze(0)\n",
    "\n",
    "# 测试\n",
    "line = 'Bai'\n",
    "line_tensor = line2tensor(line)\n",
    "\n",
    "\n",
    "rnn_output = evl_rnn(line_tensor)\n",
    "lstm_output = evl_lstm(line_tensor)\n",
    "gru_output = evl_gru(line_tensor)\n",
    "\n",
    "rnn_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "062b2bd2",
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(input_line, evl_fn, n_predictions=3):\n",
    "    print(f'{input_line}')\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        output = evl_fn(line2tensor(input_line))\n",
    "        topv, topi = output.topk(n_predictions, 1, True)\n",
    "        predictions = []\n",
    "        for i in range(n_predictions):\n",
    "            value = topv[0][i].item()\n",
    "            category_index = topi[0][i].item()\n",
    "            category = all_categories[category_index]\n",
    "            print(f'({value}) {category}')\n",
    "            predictions.append([value, category])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a152be87",
   "metadata": {},
   "outputs": [],
   "source": [
    "for fn in [evl_rnn, evl_lstm, evl_gru]:\n",
    "    print('-'*18)\n",
    "    predict('Dovesky', fn)\n",
    "    predict('Jackson', fn)\n",
    "    predict('Satoshi', fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85d59748",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torchX",
   "language": "python",
   "name": "torchx"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
