{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第一步: 导入必备的工具包."
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:10.345025Z",
     "start_time": "2025-09-03T15:33:10.330574Z"
    }
   },
   "source": [
    "# 从io中导入文件打开方法\n",
    "from io import open\n",
    "# 帮助使用正则表达式进行子目录的查询\n",
    "import glob\n",
    "import os\n",
    "# 用于获得常见字母及字符规范化\n",
    "import string\n",
    "import unicodedata\n",
    "# 导入随机工具random\n",
    "import random\n",
    "# 导入时间和数学工具包\n",
    "import time\n",
    "import math\n",
    "# 导入torch工具\n",
    "import torch\n",
    "# 导入nn准备构建模型\n",
    "import torch.nn as nn\n",
    "# 引入制图工具包        \n",
    "import matplotlib.pyplot as plt"
   ],
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'torch'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mModuleNotFoundError\u001B[0m                       Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[2], line 15\u001B[0m\n\u001B[0;32m     13\u001B[0m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;21;01mmath\u001B[39;00m\n\u001B[0;32m     14\u001B[0m \u001B[38;5;66;03m# 导入torch工具\u001B[39;00m\n\u001B[1;32m---> 15\u001B[0m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;21;01mtorch\u001B[39;00m\n\u001B[0;32m     16\u001B[0m \u001B[38;5;66;03m# 导入nn准备构建模型\u001B[39;00m\n\u001B[0;32m     17\u001B[0m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;21;01mtorch\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mnn\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m \u001B[38;5;21;01mnn\u001B[39;00m\n",
      "\u001B[1;31mModuleNotFoundError\u001B[0m: No module named 'torch'"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第二步: 对data文件中的数据进行处理，满足训练要求."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 获取常用的字符数量"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:17.820231Z",
     "start_time": "2025-09-03T15:33:17.817074Z"
    }
   },
   "source": [
    "# 获取所有常用字符包括字母和常用标点\n",
    "all_letters = string.ascii_letters + \" .,;'\"\n",
    "\n",
    "# 获取常用字符数量\n",
    "n_letters = len(all_letters)\n",
    "\n",
    "print(\"n_letter:\", n_letters)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "n_letter: 57\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 字符规范化之unicode转Ascii函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:20.754078Z",
     "start_time": "2025-09-03T15:33:20.751248Z"
    }
   },
   "source": [
    "# 关于编码问题我们暂且不去考虑\n",
    "# 我们认为这个函数的作用就是去掉一些语言中的重音标记\n",
    "# 如: Ślusàrski ---> Slusarski\n",
    "def unicodeToAscii(s):\n",
    "    # normalize() 第一个参数指定字符串标准化的方式。 NFC表示字符应该是整体组成(比如可能的话就使用单一编码)，而NFD表示字符应该分解为多个组合字符表示。\n",
    "    # Python同样支持扩展的标准化形式NFKC和NFKD，它们在处理某些字符的时候增加了额外的兼容特性。\n",
    "    return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters)"
   ],
   "outputs": [],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:23.852098Z",
     "start_time": "2025-09-03T15:33:23.846243Z"
    }
   },
   "source": [
    "## 调用验证\n",
    "s = \"Ślusàrski\"\n",
    "a = unicodeToAscii(s)\n",
    "print(a)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Slusarski\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建一个从持久化文件中读取内容到内存的函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:41.276281Z",
     "start_time": "2025-09-03T15:33:41.269733Z"
    }
   },
   "source": [
    "data_path = \"./data/names/\"\n",
    "\n",
    "def readLines(filename):\n",
    "    \"\"\"从文件中读取每一行加载到内存中形成列表\"\"\"\n",
    "    # 打开指定文件并读取所有内容, 使用strip()去除两侧空白符, 然后以'\\n'进行切分\n",
    "    lines = open(filename, encoding='utf-8').read().strip().split('\\n')\n",
    "    # 对应每一个lines列表中的名字进行Ascii转换, 使其规范化.最后返回一个名字列表\n",
    "    return [unicodeToAscii(line) for line in lines]"
   ],
   "outputs": [],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:43.785570Z",
     "start_time": "2025-09-03T15:33:43.765901Z"
    }
   },
   "source": [
    "# 调用验证\n",
    "# filename是数据集中某个具体的文件, 我们这里选择Chinese.txt\n",
    "filename = data_path + \"Chinese.txt\"\n",
    "lines = readLines(filename)\n",
    "print(lines)"
   ],
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: './data/names/Chinese.txt'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mFileNotFoundError\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[8], line 4\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 调用验证\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# filename是数据集中某个具体的文件, 我们这里选择Chinese.txt\u001B[39;00m\n\u001B[0;32m      3\u001B[0m filename \u001B[38;5;241m=\u001B[39m data_path \u001B[38;5;241m+\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mChinese.txt\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m----> 4\u001B[0m lines \u001B[38;5;241m=\u001B[39m readLines(filename)\n\u001B[0;32m      5\u001B[0m \u001B[38;5;28mprint\u001B[39m(lines)\n",
      "Cell \u001B[1;32mIn[7], line 6\u001B[0m, in \u001B[0;36mreadLines\u001B[1;34m(filename)\u001B[0m\n\u001B[0;32m      4\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"从文件中读取每一行加载到内存中形成列表\"\"\"\u001B[39;00m\n\u001B[0;32m      5\u001B[0m \u001B[38;5;66;03m# 打开指定文件并读取所有内容, 使用strip()去除两侧空白符, 然后以'\\n'进行切分\u001B[39;00m\n\u001B[1;32m----> 6\u001B[0m lines \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mopen\u001B[39m(filename, encoding\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mutf-8\u001B[39m\u001B[38;5;124m'\u001B[39m)\u001B[38;5;241m.\u001B[39mread()\u001B[38;5;241m.\u001B[39mstrip()\u001B[38;5;241m.\u001B[39msplit(\u001B[38;5;124m'\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m      7\u001B[0m \u001B[38;5;66;03m# 对应每一个lines列表中的名字进行Ascii转换, 使其规范化.最后返回一个名字列表\u001B[39;00m\n\u001B[0;32m      8\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m [unicodeToAscii(line) \u001B[38;5;28;01mfor\u001B[39;00m line \u001B[38;5;129;01min\u001B[39;00m lines]\n",
      "\u001B[1;31mFileNotFoundError\u001B[0m: [Errno 2] No such file or directory: './data/names/Chinese.txt'"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建人名类别（所属的语言）列表与人名对应关系字典"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:33:55.048732Z",
     "start_time": "2025-09-03T15:33:55.034142Z"
    }
   },
   "source": [
    "# 构建的category_lines形如：{\"English\":[\"Lily\", \"Susan\", \"Kobe\"], \"Chinese\":[\"Zhang San\", \"Xiao Ming\"]}\n",
    "category_lines = {}\n",
    "\n",
    "# all_categories形如： [\"English\",...,\"Chinese\"]\n",
    "all_categories = []\n",
    "\n",
    "# 读取指定路径下的txt文件， 使用glob，path中可以使用正则表达式\n",
    "# glob资料：https://blog.csdn.net/qq_17753903/article/details/82180227\n",
    "for filename in glob.glob(data_path + '*.txt'):\n",
    "    # 获取每个文件的文件名, 就是对应的名字类别\n",
    "    # os.path.basename():返回path最后的文件名。\n",
    "    # 如果path以／或\\结尾，那么就会返回空值,\n",
    "    # 等同于os.path.split(path)的第二个元素。\n",
    "#     >>> import os\n",
    "#     >>> path = '/Users/houxiaojun/Data/data.csv'\n",
    "#     >>> # Get the last component of the path\n",
    "#     >>> os.path.basename(path)\n",
    "#     'data.csv'\n",
    "    # os.path.splitext：分离文件名和扩展名， 返回两个元素（文件名， 扩展名）\n",
    "    # print('os.path.basename：', os.path.basename(filename))\n",
    "    # print('os.path.splitext：', os.path.splitext(os.path.basename(filename)))\n",
    "    category = os.path.splitext(os.path.basename(filename))[0]\n",
    "    # 将其逐一装到all_categories列表中\n",
    "    all_categories.append(category)\n",
    "    # 然后读取每个文件的内容，形成名字列表\n",
    "    lines = readLines(filename)\n",
    "    # 按照对应的类别，将名字列表写入到category_lines字典中\n",
    "    category_lines[category] = lines\n",
    "\n",
    "print('category_lines==', category_lines)\n",
    "\n",
    "# 查看类别总数\n",
    "n_categories = len(all_categories)\n",
    "print(\"n_categories:\", n_categories)\n",
    "\n",
    "# 随便查看其中的一些内容\n",
    "print(category_lines['Italian'][:5])"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "category_lines== {}\n",
      "n_categories: 0\n"
     ]
    },
    {
     "ename": "KeyError",
     "evalue": "'Italian'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyError\u001B[0m                                  Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[9], line 37\u001B[0m\n\u001B[0;32m     34\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mn_categories:\u001B[39m\u001B[38;5;124m\"\u001B[39m, n_categories)\n\u001B[0;32m     36\u001B[0m \u001B[38;5;66;03m# 随便查看其中的一些内容\u001B[39;00m\n\u001B[1;32m---> 37\u001B[0m \u001B[38;5;28mprint\u001B[39m(category_lines[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mItalian\u001B[39m\u001B[38;5;124m'\u001B[39m][:\u001B[38;5;241m5\u001B[39m])\n",
      "\u001B[1;31mKeyError\u001B[0m: 'Italian'"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 将人名转化为对应onehot张量表示"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:05.901019Z",
     "start_time": "2025-09-03T15:34:05.897626Z"
    }
   },
   "source": [
    "def lineToTensor(line):\n",
    "    \"\"\"将人名转化为对应onehot张量表示, 参数line是输入的人名\"\"\"\n",
    "    # 首先初始化一个0张量, 它的形状(len(line), 1, n_letters) \n",
    "    # 代表人名中的每个字母用一个1 x n_letters的张量表示.\n",
    "    tensor = torch.zeros(len(line), 1, n_letters)\n",
    "    # 遍历这个人名中的每个字符索引和字符\n",
    "    for li, letter in enumerate(line):\n",
    "        # 使用字符串方法find找到每个字符在all_letters中的索引\n",
    "        # 它也是我们生成onehot张量中1的索引位置\n",
    "        tensor[li][0][all_letters.find(letter)] = 1\n",
    "    # 返回结果\n",
    "    return tensor"
   ],
   "outputs": [],
   "execution_count": 10
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:09.893319Z",
     "start_time": "2025-09-03T15:34:09.877239Z"
    }
   },
   "source": [
    "# 调用验证\n",
    "line = \"Bai\"\n",
    "line_tensor = lineToTensor(line)\n",
    "print(\"line_tensot:\", line_tensor)"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'torch' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[11], line 3\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 调用验证\u001B[39;00m\n\u001B[0;32m      2\u001B[0m line \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mBai\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m----> 3\u001B[0m line_tensor \u001B[38;5;241m=\u001B[39m lineToTensor(line)\n\u001B[0;32m      4\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mline_tensot:\u001B[39m\u001B[38;5;124m\"\u001B[39m, line_tensor)\n",
      "Cell \u001B[1;32mIn[10], line 5\u001B[0m, in \u001B[0;36mlineToTensor\u001B[1;34m(line)\u001B[0m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"将人名转化为对应onehot张量表示, 参数line是输入的人名\"\"\"\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# 首先初始化一个0张量, 它的形状(len(line), 1, n_letters) \u001B[39;00m\n\u001B[0;32m      4\u001B[0m \u001B[38;5;66;03m# 代表人名中的每个字母用一个1 x n_letters的张量表示.\u001B[39;00m\n\u001B[1;32m----> 5\u001B[0m tensor \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;28mlen\u001B[39m(line), \u001B[38;5;241m1\u001B[39m, n_letters)\n\u001B[0;32m      6\u001B[0m \u001B[38;5;66;03m# 遍历这个人名中的每个字符索引和字符\u001B[39;00m\n\u001B[0;32m      7\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m li, letter \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28menumerate\u001B[39m(line):\n\u001B[0;32m      8\u001B[0m     \u001B[38;5;66;03m# 使用字符串方法find找到每个字符在all_letters中的索引\u001B[39;00m\n\u001B[0;32m      9\u001B[0m     \u001B[38;5;66;03m# 它也是我们生成onehot张量中1的索引位置\u001B[39;00m\n",
      "\u001B[1;31mNameError\u001B[0m: name 'torch' is not defined"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第三步: 构建RNN模型(包括传统RNN, LSTM以及GRU).\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建传统的RNN模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:19.822430Z",
     "start_time": "2025-09-03T15:34:19.805506Z"
    }
   },
   "source": [
    "# 使用nn.RNN构建完成传统RNN使用类\n",
    "\n",
    "class RNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        \"\"\"初始化函数中有4个参数, 分别代表RNN输入最后一维尺寸, RNN的隐层最后一维尺寸, RNN层数\"\"\"\n",
    "        super(RNN, self).__init__()       \n",
    "        # 将hidden_size与num_layers传入其中\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers  \n",
    "\n",
    "        # 实例化预定义的nn.RNN, 它的三个参数分别是input_size, hidden_size, num_layers\n",
    "        self.rnn = nn.RNN(input_size, hidden_size, num_layers)\n",
    "        # 实例化nn.Linear, 这个线性层用于将nn.RNN的输出维度转化为指定的输出维度\n",
    "        self.linear = nn.Linear(hidden_size, output_size)\n",
    "        # 实例化nn中预定的Softmax层, 用于从输出层获得类别结果\n",
    "        # 注意点：在softmax层外加了log函数\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "\n",
    "\n",
    "    def forward(self, input, hidden):\n",
    "        \"\"\"完成传统RNN中的主要逻辑, 输入参数input代表输入张量, 它的形状是1 x n_letters\n",
    "           hidden代表RNN的隐层张量, 它的形状是self.num_layers x 1 x self.hidden_size\"\"\"\n",
    "        # 因为预定义的nn.RNN要求输入维度一定是三维张量, 因此在这里使用unsqueeze(0)扩展一个维度\n",
    "        input = input.unsqueeze(0)\n",
    "        # input = input\n",
    "        # 将input和hidden输入到传统RNN的实例化对象中，如果num_layers=1, rr恒等于hn\n",
    "        rr, hn = self.rnn(input, hidden)\n",
    "        # 将从RNN中获得的结果通过线性变换和softmax返回，同时返回hn作为后续RNN的输入\n",
    "        return self.softmax(self.linear(rr)), hn\n",
    "\n",
    "\n",
    "    def initHidden(self):\n",
    "        \"\"\"初始化隐层张量\"\"\"\n",
    "        # 初始化一个（self.num_layers, 1, self.hidden_size）形状的0张量     \n",
    "        return torch.zeros(self.num_layers, 1, self.hidden_size)  "
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'nn' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[12], line 3\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 使用nn.RNN构建完成传统RNN使用类\u001B[39;00m\n\u001B[1;32m----> 3\u001B[0m \u001B[38;5;28;01mclass\u001B[39;00m \u001B[38;5;21;01mRNN\u001B[39;00m(nn\u001B[38;5;241m.\u001B[39mModule):\n\u001B[0;32m      4\u001B[0m     \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m__init__\u001B[39m(\u001B[38;5;28mself\u001B[39m, input_size, hidden_size, output_size, num_layers\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m):\n\u001B[0;32m      5\u001B[0m \u001B[38;5;250m        \u001B[39m\u001B[38;5;124;03m\"\"\"初始化函数中有4个参数, 分别代表RNN输入最后一维尺寸, RNN的隐层最后一维尺寸, RNN层数\"\"\"\u001B[39;00m\n",
      "\u001B[1;31mNameError\u001B[0m: name 'nn' is not defined"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建LSTM模"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:23.664278Z",
     "start_time": "2025-09-03T15:34:23.645483Z"
    }
   },
   "source": [
    "# 使用nn.LSTM构建完成LSTM使用类\n",
    "\n",
    "class LSTM(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        \"\"\"初始化函数的参数与传统RNN相同\"\"\"\n",
    "        super(LSTM, self).__init__()\n",
    "        # 将hidden_size与num_layers传入其中\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "\n",
    "        # 实例化预定义的nn.LSTM\n",
    "        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)\n",
    "        # 实例化nn.Linear, 这个线性层用于将nn.RNN的输出维度转化为指定的输出维度\n",
    "        self.linear = nn.Linear(hidden_size, output_size)\n",
    "        # 实例化nn中预定的Softmax层, 用于从输出层获得类别结果\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "\n",
    "\n",
    "    def forward(self, input, hidden, c):\n",
    "        \"\"\"在主要逻辑函数中多出一个参数c, 也就是LSTM中的细胞状态张量\"\"\"\n",
    "        # 使用unsqueeze(0)扩展一个维度\n",
    "        input = input.unsqueeze(0)\n",
    "        # 将input, hidden以及初始化的c传入lstm中\n",
    "        rr, (hn, c) = self.lstm(input, (hidden, c))\n",
    "        # 最后返回处理后的rr, hn, c\n",
    "        return self.softmax(self.linear(rr)), hn, c\n",
    "\n",
    "    def initHiddenAndC(self):  \n",
    "        \"\"\"初始化函数不仅初始化hidden还要初始化细胞状态c, 它们形状相同\"\"\"\n",
    "        c = hidden = torch.zeros(self.num_layers, 1, self.hidden_size)\n",
    "        return hidden, c "
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'nn' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[13], line 3\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 使用nn.LSTM构建完成LSTM使用类\u001B[39;00m\n\u001B[1;32m----> 3\u001B[0m \u001B[38;5;28;01mclass\u001B[39;00m \u001B[38;5;21;01mLSTM\u001B[39;00m(nn\u001B[38;5;241m.\u001B[39mModule):\n\u001B[0;32m      4\u001B[0m     \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m__init__\u001B[39m(\u001B[38;5;28mself\u001B[39m, input_size, hidden_size, output_size, num_layers\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m):\n\u001B[0;32m      5\u001B[0m \u001B[38;5;250m        \u001B[39m\u001B[38;5;124;03m\"\"\"初始化函数的参数与传统RNN相同\"\"\"\u001B[39;00m\n",
      "\u001B[1;31mNameError\u001B[0m: name 'nn' is not defined"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建GRU模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:25.757909Z",
     "start_time": "2025-09-03T15:34:25.742339Z"
    }
   },
   "source": [
    "# 使用nn.GRU构建完成传统RNN使用类\n",
    "\n",
    "# GRU与传统RNN的外部形式相同, 都是只传递隐层张量, 因此只需要更改预定义层的名字\n",
    "\n",
    "\n",
    "class GRU(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size, output_size, num_layers=1):\n",
    "        super(GRU, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "\n",
    "        # 实例化预定义的nn.GRU, 它的三个参数分别是input_size, hidden_size, num_layers\n",
    "        self.gru = nn.GRU(input_size, hidden_size, num_layers)\n",
    "        self.linear = nn.Linear(hidden_size, output_size)\n",
    "        self.softmax = nn.LogSoftmax(dim=-1)\n",
    "\n",
    "    def forward(self, input, hidden):\n",
    "        input = input.unsqueeze(0)\n",
    "        rr, hn = self.gru(input, hidden)\n",
    "        return self.softmax(self.linear(rr)), hn\n",
    "\n",
    "    def initHidden(self):\n",
    "        return torch.zeros(self.num_layers, 1, self.hidden_size)\n"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'nn' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[14], line 6\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 使用nn.GRU构建完成传统RNN使用类\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# GRU与传统RNN的外部形式相同, 都是只传递隐层张量, 因此只需要更改预定义层的名字\u001B[39;00m\n\u001B[1;32m----> 6\u001B[0m \u001B[38;5;28;01mclass\u001B[39;00m \u001B[38;5;21;01mGRU\u001B[39;00m(nn\u001B[38;5;241m.\u001B[39mModule):\n\u001B[0;32m      7\u001B[0m     \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m__init__\u001B[39m(\u001B[38;5;28mself\u001B[39m, input_size, hidden_size, output_size, num_layers\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m):\n\u001B[0;32m      8\u001B[0m         \u001B[38;5;28msuper\u001B[39m(GRU, \u001B[38;5;28mself\u001B[39m)\u001B[38;5;241m.\u001B[39m\u001B[38;5;21m__init__\u001B[39m()\n",
      "\u001B[1;31mNameError\u001B[0m: name 'nn' is not defined"
     ]
    }
   ],
   "execution_count": 14
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:26.625515Z",
     "start_time": "2025-09-03T15:34:26.621482Z"
    }
   },
   "source": [
    "#  调用验证\n",
    "# 因为是onehot编码, 输入张量最后一维的尺寸就是n_letters\n",
    "input_size = n_letters\n",
    "\n",
    "# 定义隐层的最后一维尺寸大小\n",
    "n_hidden = 128\n",
    "\n",
    "# 输出尺寸为语言类别总数n_categories\n",
    "output_size = n_categories\n",
    "\n",
    "# num_layer使用默认值, num_layers = 1"
   ],
   "outputs": [],
   "execution_count": 15
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:27.333299Z",
     "start_time": "2025-09-03T15:34:27.318911Z"
    }
   },
   "source": [
    "# 假如我们以一个字母B作为RNN的首次输入, 它通过lineToTensor转为张量\n",
    "# 因为我们的lineToTensor输出是三维张量, 而RNN类需要的二维张量\n",
    "# 因此需要使用squeeze(0)降低一个维度\n",
    "input = lineToTensor('B').squeeze(0)\n",
    "# input = lineToTensor(\"B\")\n",
    "# 初始化一个三维的隐层0张量, 也是初始的细胞状态张量\n",
    "hidden = c = torch.zeros(1, 1, n_hidden)"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'torch' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[16], line 4\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 假如我们以一个字母B作为RNN的首次输入, 它通过lineToTensor转为张量\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# 因为我们的lineToTensor输出是三维张量, 而RNN类需要的二维张量\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# 因此需要使用squeeze(0)降低一个维度\u001B[39;00m\n\u001B[1;32m----> 4\u001B[0m \u001B[38;5;28minput\u001B[39m \u001B[38;5;241m=\u001B[39m lineToTensor(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mB\u001B[39m\u001B[38;5;124m'\u001B[39m)\u001B[38;5;241m.\u001B[39msqueeze(\u001B[38;5;241m0\u001B[39m)\n\u001B[0;32m      5\u001B[0m \u001B[38;5;66;03m# input = lineToTensor(\"B\")\u001B[39;00m\n\u001B[0;32m      6\u001B[0m \u001B[38;5;66;03m# 初始化一个三维的隐层0张量, 也是初始的细胞状态张量\u001B[39;00m\n\u001B[0;32m      7\u001B[0m hidden \u001B[38;5;241m=\u001B[39m c \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, n_hidden)\n",
      "Cell \u001B[1;32mIn[10], line 5\u001B[0m, in \u001B[0;36mlineToTensor\u001B[1;34m(line)\u001B[0m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"将人名转化为对应onehot张量表示, 参数line是输入的人名\"\"\"\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# 首先初始化一个0张量, 它的形状(len(line), 1, n_letters) \u001B[39;00m\n\u001B[0;32m      4\u001B[0m \u001B[38;5;66;03m# 代表人名中的每个字母用一个1 x n_letters的张量表示.\u001B[39;00m\n\u001B[1;32m----> 5\u001B[0m tensor \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mzeros(\u001B[38;5;28mlen\u001B[39m(line), \u001B[38;5;241m1\u001B[39m, n_letters)\n\u001B[0;32m      6\u001B[0m \u001B[38;5;66;03m# 遍历这个人名中的每个字符索引和字符\u001B[39;00m\n\u001B[0;32m      7\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m li, letter \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28menumerate\u001B[39m(line):\n\u001B[0;32m      8\u001B[0m     \u001B[38;5;66;03m# 使用字符串方法find找到每个字符在all_letters中的索引\u001B[39;00m\n\u001B[0;32m      9\u001B[0m     \u001B[38;5;66;03m# 它也是我们生成onehot张量中1的索引位置\u001B[39;00m\n",
      "\u001B[1;31mNameError\u001B[0m: name 'torch' is not defined"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:28.553063Z",
     "start_time": "2025-09-03T15:34:28.540859Z"
    }
   },
   "source": [
    "rnn = RNN(n_letters, n_hidden, n_categories)\n",
    "lstm = LSTM(n_letters, n_hidden, n_categories)\n",
    "gru = GRU(n_letters, n_hidden, n_categories)\n",
    "\n",
    "rnn_output, next_hidden = rnn(input, hidden)\n",
    "print(\"rnn:\", rnn_output)\n",
    "# print('rnn_size:', rnn_output.size()) # rnn_size: torch.Size([1, 1, 18])\n",
    "# print('squeeze:', rnn_output.squeeze(0).size()) # squeeze: torch.Size([1, 18])\n",
    "lstm_output, next_hidden, c = lstm(input, hidden, c)\n",
    "print(\"lstm:\", lstm_output)\n",
    "gru_output, next_hidden = gru(input, hidden)\n",
    "print(\"gru:\", gru_output)"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'RNN' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[17], line 1\u001B[0m\n\u001B[1;32m----> 1\u001B[0m rnn \u001B[38;5;241m=\u001B[39m RNN(n_letters, n_hidden, n_categories)\n\u001B[0;32m      2\u001B[0m lstm \u001B[38;5;241m=\u001B[39m LSTM(n_letters, n_hidden, n_categories)\n\u001B[0;32m      3\u001B[0m gru \u001B[38;5;241m=\u001B[39m GRU(n_letters, n_hidden, n_categories)\n",
      "\u001B[1;31mNameError\u001B[0m: name 'RNN' is not defined"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第四步: 构建训练函数并进行训练."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 从输出结果中获得指定类别函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:31.016836Z",
     "start_time": "2025-09-03T15:34:31.013255Z"
    }
   },
   "source": [
    "def categoryFromOutput(output):\n",
    "    \"\"\"从输出结果中获得指定类别, 参数为输出张量output\"\"\"\n",
    "    # 从输出张量中返回最大的值和索引对象, 我们这里主要需要这个索引\n",
    "    # 注意： pytorch.topk()用于返回Tensor中的前k个元素以及元素对应的索引值\n",
    "    top_n, top_i = output.topk(1)\n",
    "    # top_i对象中取出索引的值\n",
    "    category_i = top_i[0].item()\n",
    "    # 根据索引值获得对应语言类别, 返回语言类别和索引值\n",
    "    return all_categories[category_i], category_i"
   ],
   "outputs": [],
   "execution_count": 18
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:32.913622Z",
     "start_time": "2025-09-03T15:34:32.900660Z"
    }
   },
   "source": [
    "# 调用验证\n",
    "output = gru_output\n",
    "category, category_i = categoryFromOutput(output)\n",
    "print(\"category:\", category) \n",
    "print(\"category_i:\", category_i)"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'gru_output' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[19], line 2\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 调用验证\u001B[39;00m\n\u001B[1;32m----> 2\u001B[0m output \u001B[38;5;241m=\u001B[39m gru_output\n\u001B[0;32m      3\u001B[0m category, category_i \u001B[38;5;241m=\u001B[39m categoryFromOutput(output)\n\u001B[0;32m      4\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcategory:\u001B[39m\u001B[38;5;124m\"\u001B[39m, category) \n",
      "\u001B[1;31mNameError\u001B[0m: name 'gru_output' is not defined"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 随机生成训练数据"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:35.278898Z",
     "start_time": "2025-09-03T15:34:35.275155Z"
    }
   },
   "source": [
    "def randomTrainingExample():\n",
    "    \"\"\"该函数用于随机产生训练数据\"\"\"\n",
    "    # 首先使用random的choice方法从all_categories随机选择一个类别\n",
    "    category = random.choice(all_categories)\n",
    "    # 然后在通过category_lines字典取category类别对应的名字列表\n",
    "    # 之后再从列表中随机取一个名字\n",
    "    line = random.choice(category_lines[category])\n",
    "    # 接着将这个类别在所有类别列表中的索引封装成tensor, 得到类别张量category_tensor\n",
    "    category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)\n",
    "    # 最后, 将随机取到的名字通过函数lineToTensor转化为onehot张量表示\n",
    "    line_tensor = lineToTensor(line)\n",
    "    return category, line, category_tensor, line_tensor\n"
   ],
   "outputs": [],
   "execution_count": 20
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:36.409753Z",
     "start_time": "2025-09-03T15:34:36.349353Z"
    }
   },
   "source": [
    "#  调用验证\n",
    "# 我们随机取出十个进行结果查看\n",
    "for i in range(10):\n",
    "    category, line, category_tensor, line_tensor = randomTrainingExample()\n",
    "    print('category =', category, '/ line =', line, '/ category_tensor =', category_tensor)"
   ],
   "outputs": [
    {
     "ename": "IndexError",
     "evalue": "Cannot choose from an empty sequence",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mIndexError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[21], line 4\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m#  调用验证\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# 我们随机取出十个进行结果查看\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;241m10\u001B[39m):\n\u001B[1;32m----> 4\u001B[0m     category, line, category_tensor, line_tensor \u001B[38;5;241m=\u001B[39m randomTrainingExample()\n\u001B[0;32m      5\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mcategory =\u001B[39m\u001B[38;5;124m'\u001B[39m, category, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m/ line =\u001B[39m\u001B[38;5;124m'\u001B[39m, line, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m/ category_tensor =\u001B[39m\u001B[38;5;124m'\u001B[39m, category_tensor)\n",
      "Cell \u001B[1;32mIn[20], line 4\u001B[0m, in \u001B[0;36mrandomTrainingExample\u001B[1;34m()\u001B[0m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"该函数用于随机产生训练数据\"\"\"\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# 首先使用random的choice方法从all_categories随机选择一个类别\u001B[39;00m\n\u001B[1;32m----> 4\u001B[0m category \u001B[38;5;241m=\u001B[39m random\u001B[38;5;241m.\u001B[39mchoice(all_categories)\n\u001B[0;32m      5\u001B[0m \u001B[38;5;66;03m# 然后在通过category_lines字典取category类别对应的名字列表\u001B[39;00m\n\u001B[0;32m      6\u001B[0m \u001B[38;5;66;03m# 之后再从列表中随机取一个名字\u001B[39;00m\n\u001B[0;32m      7\u001B[0m line \u001B[38;5;241m=\u001B[39m random\u001B[38;5;241m.\u001B[39mchoice(category_lines[category])\n",
      "File \u001B[1;32mD:\\Program Files\\Anaconda\\Lib\\random.py:347\u001B[0m, in \u001B[0;36mRandom.choice\u001B[1;34m(self, seq)\u001B[0m\n\u001B[0;32m    344\u001B[0m \u001B[38;5;66;03m# As an accommodation for NumPy, we don't use \"if not seq\"\u001B[39;00m\n\u001B[0;32m    345\u001B[0m \u001B[38;5;66;03m# because bool(numpy.array()) raises a ValueError.\u001B[39;00m\n\u001B[0;32m    346\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(seq):\n\u001B[1;32m--> 347\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mIndexError\u001B[39;00m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mCannot choose from an empty sequence\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m    348\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m seq[\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_randbelow(\u001B[38;5;28mlen\u001B[39m(seq))]\n",
      "\u001B[1;31mIndexError\u001B[0m: Cannot choose from an empty sequence"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建传统RNN训练函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:37.926350Z",
     "start_time": "2025-09-03T15:34:37.912885Z"
    }
   },
   "source": [
    "# 定义损失函数为nn.NLLLoss，因为RNN的最后一层是nn.LogSoftmax, 两者的内部计算逻辑正好能够吻合.  \n",
    "# 扩展资料：nn.NLLLoss(): https://blog.csdn.net/jeremy_lf/article/details/102725285\n",
    "criterion = nn.NLLLoss()\n",
    "\n",
    "# 设置学习率为0.005\n",
    "learning_rate = 0.005 \n",
    "\n",
    "def trainRNN(category_tensor, line_tensor):\n",
    "    \"\"\"定义训练函数, 它的两个参数是category_tensor类别的张量表示, 相当于训练数据的标签,\n",
    "       line_tensor名字的张量表示, 相当于对应训练数据\"\"\"\n",
    "\n",
    "    # 在函数中, 首先通过实例化对象rnn初始化隐层张量\n",
    "    hidden = rnn.initHidden()\n",
    "\n",
    "    # 然后将模型结构中的梯度归0\n",
    "    rnn.zero_grad()\n",
    "\n",
    "    # 下面开始进行训练, 将训练数据line_tensor的每个字符逐个传入rnn之中, 得到最终结果\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = rnn(line_tensor[i], hidden)\n",
    "\n",
    "    # 因为我们的rnn对象由nn.RNN实例化得到, 最终输出形状是三维张量, 为了满足于category_tensor\n",
    "    # 进行对比计算损失, 需要减少第一个维度, 这里使用squeeze()方法\n",
    "    # 理解：表示的意义为： 在[1, 18]一行18列中， 取出和类别一样的那个预测值和真实值进行损失值的求解过程\n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "\n",
    "    # 损失进行反向传播\n",
    "    loss.backward()\n",
    "    # 更新模型中所有的参数\n",
    "    for p in rnn.parameters():\n",
    "        # 将参数的张量表示与参数的梯度乘以学习率的结果相加以此来更新参数\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "    # 返回结果和损失的值\n",
    "    return output, loss.item()"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'nn' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[22], line 3\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 定义损失函数为nn.NLLLoss，因为RNN的最后一层是nn.LogSoftmax, 两者的内部计算逻辑正好能够吻合.  \u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# 扩展资料：nn.NLLLoss(): https://blog.csdn.net/jeremy_lf/article/details/102725285\u001B[39;00m\n\u001B[1;32m----> 3\u001B[0m criterion \u001B[38;5;241m=\u001B[39m nn\u001B[38;5;241m.\u001B[39mNLLLoss()\n\u001B[0;32m      5\u001B[0m \u001B[38;5;66;03m# 设置学习率为0.005\u001B[39;00m\n\u001B[0;32m      6\u001B[0m learning_rate \u001B[38;5;241m=\u001B[39m \u001B[38;5;241m0.005\u001B[39m \n",
      "\u001B[1;31mNameError\u001B[0m: name 'nn' is not defined"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建LSTM训练函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:39.475968Z",
     "start_time": "2025-09-03T15:34:39.471801Z"
    }
   },
   "source": [
    "# 与传统RNN相比多出细胞状态c\n",
    "\n",
    "def trainLSTM(category_tensor, line_tensor):\n",
    "    hidden, c = lstm.initHiddenAndC()\n",
    "    lstm.zero_grad()\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        # 返回output, hidden以及细胞状态c\n",
    "        output, hidden, c = lstm(line_tensor[i], hidden, c)\n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "    loss.backward()\n",
    "\n",
    "    for p in lstm.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "    return output, loss.item()"
   ],
   "outputs": [],
   "execution_count": 23
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建GRU训练函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:40.813658Z",
     "start_time": "2025-09-03T15:34:40.805941Z"
    }
   },
   "source": [
    "# 与传统RNN完全相同, 只不过名字改成了GRU\n",
    "\n",
    "def trainGRU(category_tensor, line_tensor):\n",
    "    hidden = gru.initHidden()\n",
    "    gru.zero_grad()\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden= gru(line_tensor[i], hidden)\n",
    "    loss = criterion(output.squeeze(0), category_tensor)\n",
    "    loss.backward()\n",
    "\n",
    "    for p in gru.parameters():\n",
    "        p.data.add_(-learning_rate, p.grad.data)\n",
    "    return output, loss.item()"
   ],
   "outputs": [],
   "execution_count": 24
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建时间计算函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:41.727608Z",
     "start_time": "2025-09-03T15:34:41.724571Z"
    }
   },
   "source": [
    "def timeSince(since):\n",
    "    \"获得每次打印的训练耗时, since是训练开始时间\"\n",
    "    # 获得当前时间\n",
    "    now = time.time()\n",
    "    # 获得时间差，就是训练耗时\n",
    "    s = now - since\n",
    "    # 将秒转化为分钟, 并取整\n",
    "    m = math.floor(s / 60)\n",
    "    # 计算剩下不够凑成1分钟的秒数\n",
    "    s -= m * 60\n",
    "    # 返回指定格式的耗时\n",
    "    return '%dm %ds' % (m, s)"
   ],
   "outputs": [],
   "execution_count": 25
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:42.195203Z",
     "start_time": "2025-09-03T15:34:42.189213Z"
    }
   },
   "source": [
    "# 调用验证\n",
    "# 假定模型训练开始时间是10min之前\n",
    "since = time.time() - 10*60\n",
    "period = timeSince(since)\n",
    "print(period)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10m 0s\n"
     ]
    }
   ],
   "execution_count": 26
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建训练过程的日志打印函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:44.453236Z",
     "start_time": "2025-09-03T15:34:44.448276Z"
    }
   },
   "source": [
    "# 设置训练迭代次数\n",
    "n_iters = 1000\n",
    "# 设置结果的打印间隔\n",
    "print_every = 50\n",
    "# 设置绘制损失曲线上的制图间隔\n",
    "plot_every = 10\n",
    "\n",
    "def train(train_type_fn):\n",
    "    \"\"\"训练过程的日志打印函数, 参数train_type_fn代表选择哪种模型训练函数, 如trainRNN\"\"\"\n",
    "    # 每个制图间隔损失保存列表\n",
    "    all_losses = []\n",
    "    # 获得训练开始时间戳\n",
    "    start = time.time()\n",
    "    # 设置初始间隔损失为0\n",
    "    current_loss = 0\n",
    "    # 从1开始进行训练迭代, 共n_iters次 \n",
    "    for iter in range(1, n_iters + 1):\n",
    "        # 通过randomTrainingExample函数随机获取一组训练数据和对应的类别\n",
    "        category, line, category_tensor, line_tensor = randomTrainingExample()\n",
    "        # 将训练数据和对应类别的张量表示传入到train函数中\n",
    "        output, loss = train_type_fn(category_tensor, line_tensor)      \n",
    "        # 计算制图间隔中的总损失\n",
    "        current_loss += loss   \n",
    "        # 如果迭代数能够整除打印间隔\n",
    "        if iter % print_every == 0:\n",
    "            # 取该迭代步上的output通过categoryFromOutput函数获得对应的类别和类别索引\n",
    "            guess, guess_i = categoryFromOutput(output)\n",
    "            # 然后和真实的类别category做比较, 如果相同则打对号, 否则打叉号.\n",
    "            correct = '✓' if guess == category else '✗ (%s)' % category\n",
    "            # 打印迭代步, 迭代步百分比, 当前训练耗时, 损失, 该步预测的名字, 以及是否正确                                \n",
    "            print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))\n",
    "\n",
    "        # 如果迭代数能够整除制图间隔\n",
    "        if iter % plot_every == 0:\n",
    "            # 将保存该间隔中的平均损失到all_losses列表中\n",
    "            all_losses.append(current_loss / plot_every)\n",
    "            # 间隔损失重置为0\n",
    "            current_loss = 0\n",
    "    # 返回对应的总损失列表和训练耗时\n",
    "    return all_losses, int(time.time() - start)"
   ],
   "outputs": [],
   "execution_count": 27
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 开始训练传统RNN, LSTM, GRU模型并制作对比图"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:57.519762Z",
     "start_time": "2025-09-03T15:34:57.506150Z"
    }
   },
   "source": [
    "# 调用train函数, 分别进行RNN, LSTM, GRU模型的训练\n",
    "# 并返回各自的全部损失, 以及训练耗时用于制图\n",
    "print(\"RNN日志输出:\")\n",
    "all_losses1, period1 = train(trainRNN)\n",
    "print(\"LSTM日志输出:\")\n",
    "all_losses2, period2 = train(trainLSTM)\n",
    "print(\"GRU日志输出:\")\n",
    "all_losses3, period3 = train(trainGRU)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RNN日志输出:\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'trainRNN' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[28], line 4\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 调用train函数, 分别进行RNN, LSTM, GRU模型的训练\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# 并返回各自的全部损失, 以及训练耗时用于制图\u001B[39;00m\n\u001B[0;32m      3\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mRNN日志输出:\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m----> 4\u001B[0m all_losses1, period1 \u001B[38;5;241m=\u001B[39m train(trainRNN)\n\u001B[0;32m      5\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mLSTM日志输出:\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m      6\u001B[0m all_losses2, period2 \u001B[38;5;241m=\u001B[39m train(trainLSTM)\n",
      "\u001B[1;31mNameError\u001B[0m: name 'trainRNN' is not defined"
     ]
    }
   ],
   "execution_count": 28
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T15:34:58.885109Z",
     "start_time": "2025-09-03T15:34:58.869587Z"
    }
   },
   "source": [
    "# 绘制损失对比曲线, 训练耗时对比柱张图\n",
    "# 创建画布0\n",
    "plt.figure(0, figsize=(20,8), dpi=100)\n",
    "# 绘制损失对比曲线\n",
    "plt.plot(all_losses1, label=\"RNN\")\n",
    "plt.plot(all_losses2, color=\"red\", label=\"LSTM\")\n",
    "plt.plot(all_losses3, color=\"orange\", label=\"GRU\") \n",
    "plt.legend(loc='upper left') \n",
    "\n",
    "\n",
    "# 创建画布1\n",
    "plt.figure(1, figsize=(20,8), dpi=100)\n",
    "x_data=[\"RNN\", \"LSTM\", \"GRU\"] \n",
    "y_data = [period1, period2, period3]\n",
    "# 绘制训练耗时对比柱状图\n",
    "plt.bar(range(len(x_data)), y_data, tick_label=x_data)\n",
    "plt.show()"
   ],
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'plt' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[29], line 3\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;66;03m# 绘制损失对比曲线, 训练耗时对比柱张图\u001B[39;00m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;66;03m# 创建画布0\u001B[39;00m\n\u001B[1;32m----> 3\u001B[0m plt\u001B[38;5;241m.\u001B[39mfigure(\u001B[38;5;241m0\u001B[39m, figsize\u001B[38;5;241m=\u001B[39m(\u001B[38;5;241m20\u001B[39m,\u001B[38;5;241m8\u001B[39m), dpi\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m100\u001B[39m)\n\u001B[0;32m      4\u001B[0m \u001B[38;5;66;03m# 绘制损失对比曲线\u001B[39;00m\n\u001B[0;32m      5\u001B[0m plt\u001B[38;5;241m.\u001B[39mplot(all_losses1, label\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mRNN\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
      "\u001B[1;31mNameError\u001B[0m: name 'plt' is not defined"
     ]
    }
   ],
   "execution_count": 29
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第五步: 构建评估函数并进行预测."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建传统RNN评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluateRNN(line_tensor):\n",
    "    \"\"\"评估函数, 和训练函数逻辑相同, 参数是line_tensor代表名字的张量表示\"\"\"\n",
    "    # 初始化隐层张量\n",
    "    hidden = rnn.initHidden()\n",
    "    # 将评估数据line_tensor的每个字符逐个传入rnn之中\n",
    "    print('line_tensor.size==', line_tensor.size())\n",
    "    print('line_tensor.size[]==', line_tensor.size()[0])\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        print('line_tensor[i]===', line_tensor[i])\n",
    "        output, hidden = rnn(line_tensor[i], hidden)\n",
    "    print('outputsize==', output.size())\n",
    "    # 获得输出结果\n",
    "    return output.squeeze(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建LSTM评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluateLSTM(line_tensor):\n",
    "    # 初始化隐层张量和细胞状态张量\n",
    "    hidden, c = lstm.initHiddenAndC()\n",
    "    # 将评估数据line_tensor的每个字符逐个传入lstm之中\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden, c = lstm(line_tensor[i], hidden, c)\n",
    "    \n",
    "    return output.squeeze(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建GRU评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluateGRU(line_tensor):\n",
    "    hidden = gru.initHidden()\n",
    "    # 将评估数据line_tensor的每个字符逐个传入gru之中\n",
    "    for i in range(line_tensor.size()[0]):\n",
    "        output, hidden = gru(line_tensor[i], hidden)\n",
    "    return output.squeeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 调用验证\n",
    "line = \"Bai\"\n",
    "line_tensor = lineToTensor(line)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "line_tensor.size== torch.Size([3, 1, 57])\n",
      "line_tensor.size[]== 3\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "outputsize== torch.Size([1, 1, 18])\n",
      "rnn_output: tensor([[-2.8795, -2.8467, -2.8752, -2.8899, -2.9733, -2.9124, -2.9040, -2.9107,\n",
      "         -2.8473, -2.8183, -2.9301, -2.8845, -2.7910, -2.9851, -2.7514, -2.9404,\n",
      "         -2.9475, -2.9746]], grad_fn=<SqueezeBackward1>)\n",
      "gru_output: tensor([[-3.0079, -2.8799, -2.8678, -2.8241, -2.8551, -2.9573, -2.8252, -2.8306,\n",
      "         -2.8861, -2.9102, -3.0387, -2.8362, -2.9103, -2.9285, -2.8446, -2.9814,\n",
      "         -2.8498, -2.8300]], grad_fn=<SqueezeBackward1>)\n",
      "gru_output: tensor([[-2.8958, -2.8557, -2.8501, -2.8590, -2.8238, -2.9248, -3.0951, -2.8608,\n",
      "         -2.8034, -2.7923, -2.8793, -2.9487, -2.8155, -2.8471, -2.9650, -2.8823,\n",
      "         -2.9750, -3.0046]], grad_fn=<SqueezeBackward1>)\n"
     ]
    }
   ],
   "source": [
    "rnn_output = evaluateRNN(line_tensor)\n",
    "lstm_output = evaluateLSTM(line_tensor)\n",
    "gru_output = evaluateGRU(line_tensor)\n",
    "print(\"rnn_output:\", rnn_output)\n",
    "print(\"gru_output:\", lstm_output)\n",
    "print(\"gru_output:\", gru_output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  构建预测函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(input_line, evaluate, n_predictions=3):\n",
    "    \"\"\"预测函数, 输入参数input_line代表输入的名字, \n",
    "       n_predictions代表需要取最有可能的top个\"\"\"\n",
    "    # 首先打印输入\n",
    "    print('\\n> %s' % input_line)\n",
    "\n",
    "    # 以下操作的相关张量不进行求梯度\n",
    "    with torch.no_grad():\n",
    "        # 使输入的名字转换为张量表示, 并使用evaluate函数获得预测输出\n",
    "        output = evaluate(lineToTensor(input_line))\n",
    "\n",
    "        # 从预测的输出中取前3个最大的值及其索引\n",
    "        topv, topi = output.topk(n_predictions, 1, True)\n",
    "        # 创建盛装结果的列表\n",
    "        predictions = []\n",
    "        # 遍历n_predictions\n",
    "        for i in range(n_predictions):\n",
    "            # 从topv中取出的output值\n",
    "            value = topv[0][i].item()\n",
    "            # 取出索引并找到对应的类别\n",
    "            category_index = topi[0][i].item()\n",
    "            # 打印ouput的值, 和对应的类别\n",
    "            print('(%.2f) %s' % (value, all_categories[category_index]))\n",
    "            # 将结果装进predictions中\n",
    "            predictions.append([value, all_categories[category_index]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "------------------\n",
      "\n",
      "> Dovesky\n",
      "line_tensor.size== torch.Size([7, 1, 57])\n",
      "line_tensor.size[]== 7\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "outputsize== torch.Size([1, 1, 18])\n",
      "(-2.67) Polish\n",
      "(-2.70) Russian\n",
      "(-2.82) Irish\n",
      "\n",
      "> Jackson\n",
      "line_tensor.size== torch.Size([7, 1, 57])\n",
      "line_tensor.size[]== 7\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "outputsize== torch.Size([1, 1, 18])\n",
      "(-2.68) Russian\n",
      "(-2.80) Arabic\n",
      "(-2.84) Spanish\n",
      "\n",
      "> Satoshi\n",
      "line_tensor.size== torch.Size([7, 1, 57])\n",
      "line_tensor.size[]== 7\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "line_tensor[i]=== tensor([[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "         0., 0., 0.]])\n",
      "outputsize== torch.Size([1, 1, 18])\n",
      "(-2.71) Polish\n",
      "(-2.76) Italian\n",
      "(-2.78) Russian\n",
      "------------------\n",
      "\n",
      "> Dovesky\n",
      "(-2.82) Greek\n",
      "(-2.82) Dutch\n",
      "(-2.82) Russian\n",
      "\n",
      "> Jackson\n",
      "(-2.81) Vietnamese\n",
      "(-2.82) Korean\n",
      "(-2.83) Russian\n",
      "\n",
      "> Satoshi\n",
      "(-2.83) German\n",
      "(-2.83) Greek\n",
      "(-2.83) Dutch\n",
      "------------------\n",
      "\n",
      "> Dovesky\n",
      "(-2.78) Italian\n",
      "(-2.80) Czech\n",
      "(-2.81) Irish\n",
      "\n",
      "> Jackson\n",
      "(-2.77) Italian\n",
      "(-2.80) Irish\n",
      "(-2.81) Czech\n",
      "\n",
      "> Satoshi\n",
      "(-2.79) Irish\n",
      "(-2.80) Italian\n",
      "(-2.81) English\n"
     ]
    }
   ],
   "source": [
    "# 调用\n",
    "for evaluate_fn in [evaluateRNN, evaluateLSTM, evaluateGRU]: \n",
    "    print(\"-\"*18)\n",
    "    predict('Dovesky', evaluate_fn)\n",
    "    predict('Jackson', evaluate_fn)\n",
    "    predict('Satoshi', evaluate_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": ""
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": ""
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
