{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras.datasets import imdb\n",
    "\n",
    "# 加载IMDB数据集\n",
    "# num_words=10000表示只保留最常见的10000个词，本身数据集有88582个词，如果未出现词，则用<UNK>表示\n",
    "# 参数说明：\n",
    "# - num_words: 保留的最常见词的数量\n",
    "# - skip_top: 跳过最常见的若干词（通常是停用词）\n",
    "# - maxlen: 序列最大长度\n",
    "# - index_from: 词索引的起始值，默认为3\n",
    "vocab_size=10000\n",
    "(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size, index_from=3)\n",
    "\n",
    "print(f\"训练集样本数: {len(x_train)}, 测试集样本数: {len(x_test)}\")\n",
    "print(f\"标签示例: {y_train[:10]}\")  # 0表示负面评论，1表示正面评论\n",
    "\n",
    "# 查看一个样本的词索引序列\n",
    "print(f\"一个样本的词索引序列: {x_train[0][:100]}...\")\n",
    "\n",
    "# 获取词索引映射，返回一个字典（词典），key是词，value是索引（token）\n",
    "word_index = imdb.get_word_index()\n",
    "\n",
    "word_index = {word: idx + 3 for word, idx in word_index.items()}  # 0,1,2,3空出来做别的事,这里的idx是从1开始的,所以加3\n",
    "word_index.update({\n",
    "    \"[PAD]\": 0,  # 填充 token\n",
    "    \"[BOS]\": 1,  # begin of sentence\n",
    "    \"[UNK]\": 2,  # 未知 token\n",
    "    \"[EOS]\": 3,  # end of sentence\n",
    "})\n",
    "# 创建索引到词的映射\n",
    "reverse_word_index = {i: word for word, i in word_index.items()}\n",
    "\n",
    "# 将一个样本的索引序列转换为文本\n",
    "def decode_review(indices):\n",
    "    return ' '.join([reverse_word_index.get(i, '?') for i in indices])\n",
    "\n",
    "print(\"\\n解码后的样本文本:\")\n",
    "print(decode_review(x_train[0]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f05dccc",
   "metadata": {},
   "outputs": [],
   "source": [
    "reverse_word_index[4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0159de3",
   "metadata": {},
   "outputs": [],
   "source": [
    "word_index['the']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c7cfacb",
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ab8ed72",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将测试集划分为验证集和测试集\n",
    "# 从原始测试集中取前10000个样本作为验证集\n",
    "x_val = x_test[:10000]\n",
    "y_val = y_test[:10000]\n",
    "\n",
    "# 剩余的15000个样本作为测试集\n",
    "x_test = x_test[10000:]\n",
    "y_test = y_test[10000:]\n",
    "\n",
    "print(f\"验证集样本数: {len(x_val)}\")\n",
    "print(f\"测试集样本数: {len(x_test)}\")\n",
    "print(f\"验证集标签示例: {y_val[:10]}\")\n",
    "print(f\"测试集标签示例: {y_test[:10]}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "272d907e",
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_text = [\"hello world\".split(), \"tokenize text datas with batch\".split(), \"this is a test\".split()]\n",
    "raw_text"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f2df0a9f",
   "metadata": {},
   "source": [
    "# 通过直方图来观察样本长度分布"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "881921a0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# 计算每个样本的长度\n",
    "train_lengths = [len(x) for x in x_train]\n",
    "\n",
    "# 绘制直方图\n",
    "plt.figure(figsize=(10, 6))\n",
    "plt.hist(train_lengths, bins=50, alpha=0.7, color='blue')\n",
    "plt.xlabel('Sample Length')\n",
    "plt.ylabel('Sample Count')\n",
    "plt.title('Training Set Sample Length Distribution')\n",
    "plt.grid(True, linestyle='--', alpha=0.7)\n",
    "\n",
    "# 计算一些统计信息\n",
    "max_length = max(train_lengths)\n",
    "min_length = min(train_lengths)\n",
    "avg_length = 500 #自定义了一个长度\n",
    "\n",
    "# 在图上显示统计信息\n",
    "plt.axvline(x=avg_length, color='r', linestyle='--', label=f'Average Length: {avg_length:.1f}')\n",
    "plt.text(max_length*0.7, plt.ylim()[1]*0.9, f'Max Length: {max_length}')\n",
    "plt.text(max_length*0.7, plt.ylim()[1]*0.85, f'Min Length: {min_length}')\n",
    "plt.text(max_length*0.7, plt.ylim()[1]*0.8, f'Average Length: {avg_length:.1f}')\n",
    "plt.legend()\n",
    "\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72202119",
   "metadata": {},
   "source": [
    "# Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1f1980b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "class Tokenizer:\n",
    "    def __init__(self, word_index, reverse_word_index):\n",
    "        self.word_index = word_index\n",
    "        self.reverse_word_index = reverse_word_index\n",
    "        self.pad_token = 0  # <PAD>\n",
    "        self.start_token = 1  # <START>\n",
    "        self.unk_token = 2  # <UNK>\n",
    "        self.end_token = 3  # <END>\n",
    "    \n",
    "    def encode(self, texts, maxlen=None, padding='post', truncating='post', add_start=False, add_end=False):\n",
    "        \"\"\"\n",
    "        将文本序列转换为数字序列\n",
    "        \n",
    "        参数:\n",
    "        - texts: 文本列表，每个元素是一个词列表\n",
    "        - maxlen: 序列最大长度，如果为None则使用最长序列的长度\n",
    "        - padding: 'pre'或'post'，表示在序列前或后填充\n",
    "        - truncating: 'pre'或'post'，表示从序列前或后截断\n",
    "        - add_start: 是否添加开始标记\n",
    "        - add_end: 是否添加结束标记\n",
    "        \n",
    "        返回:\n",
    "        - 编码后的序列\n",
    "        \"\"\"\n",
    "        result = [] #编码后的序列，存储整个batch的序列\n",
    "        \n",
    "        # 计算需要的序列长度\n",
    "        batch_max_len = max([len(seq) for seq in texts]) #batch内最长序列长度\n",
    "        if add_start:\n",
    "            batch_max_len += 1\n",
    "        if add_end:\n",
    "            batch_max_len += 1\n",
    "            \n",
    "        # 如果maxlen为None或者batch内最大长度小于maxlen，使用batch内最大长度\n",
    "        if maxlen is None or batch_max_len < maxlen:\n",
    "            maxlen = batch_max_len\n",
    "        \n",
    "        for text in texts:\n",
    "            sequence = []\n",
    "            \n",
    "            # 添加开始标记\n",
    "            if add_start:\n",
    "                sequence.append(self.start_token)\n",
    "            \n",
    "            # 将词转换为索引\n",
    "            for word in text:\n",
    "                sequence.append(self.word_index.get(word, self.unk_token))  \n",
    "            \n",
    "            # 添加结束标记\n",
    "            if add_end:\n",
    "                sequence.append(self.end_token)\n",
    "            \n",
    "            # 截断序列\n",
    "            if len(sequence) > maxlen:\n",
    "                if truncating == 'pre':\n",
    "                    sequence = sequence[-maxlen:]\n",
    "                else:  # truncating == 'post'\n",
    "                    sequence = sequence[:maxlen]\n",
    "            \n",
    "            # 填充序列\n",
    "            pad_length = maxlen - len(sequence)\n",
    "            if pad_length > 0:\n",
    "                if padding == 'pre':\n",
    "                    sequence = [self.pad_token] * pad_length + sequence\n",
    "                else:  # padding == 'post'\n",
    "                    sequence = sequence + [self.pad_token] * pad_length\n",
    "            \n",
    "            result.append(sequence)\n",
    "        \n",
    "        return np.array(result)\n",
    "    \n",
    "    def decode(self, sequences):\n",
    "        \"\"\"\n",
    "        将数字序列转换回文本\n",
    "        \n",
    "        参数:\n",
    "        - sequences: 数字序列列表\n",
    "        \n",
    "        返回:\n",
    "        - 解码后的文本列表\n",
    "        \"\"\"\n",
    "        result = []\n",
    "        for sequence in sequences:\n",
    "            words = []\n",
    "            for idx in sequence:\n",
    "                if idx == self.pad_token:\n",
    "                    continue  # 跳过填充标记\n",
    "                word = self.reverse_word_index.get(idx, '?')\n",
    "                # if word not in ['[PAD]', '[BOS]', '[UNK]', '[EOS]']:\n",
    "                words.append(word)\n",
    "            result.append(' '.join(words))\n",
    "        return result\n",
    "\n",
    "# 创建Tokenizer实例\n",
    "tokenizer = Tokenizer(word_index, reverse_word_index)\n",
    "\n",
    "# 测试编码\n",
    "encoded = tokenizer.encode(raw_text, maxlen=500, padding='post', add_start=True, add_end=True)\n",
    "print(\"编码后的序列:\")\n",
    "print(encoded)\n",
    "\n",
    "# 测试解码\n",
    "decoded = tokenizer.decode(encoded)\n",
    "print(\"\\n解码后的文本:\")\n",
    "print(decoded)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f2cddf5",
   "metadata": {},
   "source": [
    "# Dataset和DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c207da73",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, random_split\n",
    "\n",
    "class TextDataset(Dataset):\n",
    "    \"\"\"\n",
    "    文本数据集类\n",
    "    \"\"\"\n",
    "    def __init__(self, sequences, tokenizer, labels=None):\n",
    "        \"\"\"\n",
    "        初始化文本数据集\n",
    "        \n",
    "        参数:\n",
    "        - sequences: 编码后的序列\n",
    "        - tokenizer: 分词器实例\n",
    "        - labels: 标签（可选）\n",
    "        \"\"\"\n",
    "        # 将编码序列解码为文本\n",
    "        self.texts = tokenizer.decode(sequences)\n",
    "        self.tokenizer = tokenizer\n",
    "        self.labels = labels\n",
    "    \n",
    "    def __len__(self):\n",
    "        \"\"\"\n",
    "        返回数据集大小\n",
    "        \"\"\"\n",
    "        return len(self.texts)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        \"\"\"\n",
    "        获取指定索引的样本\n",
    "        \n",
    "        参数:\n",
    "        - idx: 索引\n",
    "        \n",
    "        返回:\n",
    "        - 样本（文本和标签，如果有的话）\n",
    "        \"\"\"\n",
    "        if self.labels is not None:\n",
    "            return self.texts[idx], self.labels[idx]\n",
    "        return self.texts[idx]\n",
    "\n",
    "def collate_fn(batch, tokenizer, maxlen=500):\n",
    "    \"\"\"\n",
    "    自定义批处理函数，用于在加载数据时进行编码\n",
    "    \n",
    "    参数:\n",
    "    - batch: 批次数据\n",
    "    - tokenizer: 分词器实例\n",
    "    - maxlen: 最大序列长度\n",
    "    \n",
    "    返回:\n",
    "    - 编码后的序列和标签（如果有的话）\n",
    "    \"\"\"\n",
    "    if isinstance(batch[0], tuple):\n",
    "        # print(batch)\n",
    "        # 如果批次包含标签\n",
    "        text_list = [item[0].split() for item in batch]  #batch是128样本，每个样本类型是元组，第一个元素是文本，第二个元素是标签\n",
    "        label_list = [item[1] for item in batch]\n",
    "        # print(text_list)\n",
    "        encoded = tokenizer.encode(text_list, maxlen=maxlen, padding='pre', add_start=False, add_end=True)\n",
    "        sequences = torch.tensor(encoded, dtype=torch.long)\n",
    "        labels = torch.tensor(label_list, dtype=torch.float).view(-1, 1)  # 将标签reshape为二维 [batch_size, 1]\n",
    "        return sequences, labels\n",
    "    else:\n",
    "        # 如果批次只有文本\n",
    "        text_list = [item.split() for item in batch]\n",
    "        encoded = tokenizer.encode(text_list, maxlen=maxlen, padding='pre', add_start=False, add_end=True)\n",
    "        sequences = torch.tensor(encoded, dtype=torch.long)\n",
    "        return sequences\n",
    "\n",
    "# 示例：创建数据集和数据加载器\n",
    "# 假设我们有训练、验证和测试数据\n",
    "# 创建对应的数据集\n",
    "train_dataset = TextDataset(x_train, tokenizer, y_train)\n",
    "val_dataset = TextDataset(x_val, tokenizer, y_val)\n",
    "test_dataset = TextDataset(x_test, tokenizer, y_test)\n",
    "\n",
    "# 创建数据加载器\n",
    "batch_size = 128\n",
    "train_dataloader = DataLoader(\n",
    "    train_dataset, \n",
    "    batch_size=batch_size, \n",
    "    shuffle=True,\n",
    "    collate_fn=lambda batch: collate_fn(batch, tokenizer)\n",
    ")\n",
    "val_dataloader = DataLoader(\n",
    "    val_dataset, \n",
    "    batch_size=batch_size, \n",
    "    shuffle=False,\n",
    "    collate_fn=lambda batch: collate_fn(batch, tokenizer)\n",
    ")\n",
    "test_dataloader = DataLoader(\n",
    "    test_dataset, \n",
    "    batch_size=batch_size, \n",
    "    shuffle=False,\n",
    "    collate_fn=lambda batch: collate_fn(batch, tokenizer)\n",
    ")\n",
    "\n",
    "# 打印数据集和数据加载器信息\n",
    "print(f\"训练集大小: {len(train_dataset)}\")\n",
    "print(f\"验证集大小: {len(val_dataset)}\")\n",
    "print(f\"测试集大小: {len(test_dataset)}\")\n",
    "\n",
    "train_dataset[0:3]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c581e1ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 示例：遍历训练数据加载器\n",
    "print(\"\\n训练数据加载器示例:\")\n",
    "for i, (batch_sequences, batch_labels) in enumerate(train_dataloader):\n",
    "    print(f\"批次 {i+1}:\")\n",
    "    print(f\"序列形状: {batch_sequences.shape}\")\n",
    "    print(f\"标签形状: {batch_labels.shape}\")\n",
    "    if i == 0:  # 只打印第一个批次\n",
    "        break\n",
    "batch_sequences"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3fc9a057",
   "metadata": {},
   "source": [
    "# 搭建模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d445876c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "m = nn.AdaptiveAvgPool1d(1)  #输出形状为[batch_size, embedding_dim, 1],把最后一个维度size变成1\n",
    "input = torch.randn(1, 64, 8) \n",
    "output = m(input)\n",
    "output.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9693b445",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class SentimentClassifier(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim=16, hidden_dim=64, output_dim=1, \n",
    "                 rnn_layers=1, bidirectional=False, dropout_rate=0.3):\n",
    "        super().__init__()\n",
    "        \n",
    "        # 嵌入层\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        \n",
    "        # 设置RNN层\n",
    "        self.rnn = nn.RNN(embedding_dim, hidden_dim, \n",
    "                         num_layers=rnn_layers, # 层数\n",
    "                         bidirectional=bidirectional, # 是否双向\n",
    "                         batch_first=True,\n",
    "                         dropout=dropout_rate if rnn_layers > 1 else 0)\n",
    "        \n",
    "        # 确定输出维度\n",
    "        fc_input_dim = hidden_dim * 2 if bidirectional else hidden_dim\n",
    "        \n",
    "        # 增加一个fc层，输入是fc_input_dim，输出是hidden_dim\n",
    "        self.fc1 = nn.Linear(fc_input_dim, hidden_dim)\n",
    "        \n",
    "        # 全连接层\n",
    "        self.fc = nn.Linear(hidden_dim, output_dim)\n",
    "        \n",
    "        # Dropout层，防止过拟合\n",
    "        self.dropout = nn.Dropout(dropout_rate)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        # x形状: [batch_size, seq_len]\n",
    "        \n",
    "        # 通过嵌入层\n",
    "        embedded = self.embedding(x)  # [batch_size, seq_len, embedding_dim]\n",
    "        \n",
    "        # 通过RNN层\n",
    "        outputs, hidden = self.rnn(embedded)\n",
    "        \n",
    "        # print(f'outputs.shape: {outputs.shape}')\n",
    "        # print(f'hidden.shape: {hidden.shape}')\n",
    "        \n",
    "        # 取最后一个时间步的输出 (这也是为什么要设置padding_first=True的原因)\n",
    "        x = outputs[:, -1, :]\n",
    "        # # 打印一下形状，观察数据\n",
    "        # print(f'x.shape: {x.shape}')\n",
    "        # print(f'hidden.shape: {hidden.shape}')\n",
    "        \n",
    "\n",
    "        # hidden_reshaped = hidden.squeeze(0) \n",
    "        # x_reshaped = x\n",
    "\n",
    "            \n",
    "        # # 判断两个张量是否相等\n",
    "        # is_equal = torch.allclose(x_reshaped, hidden_reshaped)\n",
    "        # print(f'outputs的最后一个时间步与hidden是否相等: {is_equal}')\n",
    "        \n",
    "        # 应用dropout\n",
    "        x = self.dropout(x)\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc(x)\n",
    "        \n",
    "        return x\n",
    "\n",
    "# 初始化模型\n",
    "model = SentimentClassifier(vocab_size, rnn_layers=1, bidirectional=False)\n",
    "print(f\"模型结构:\\n{model}\")\n",
    "\n",
    "# 打印模型参数数量\n",
    "def count_parameters(model):\n",
    "    return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "\n",
    "print(f\"单层单向RNN模型参数数量: {count_parameters(model):,}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9f4f8f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个函数来输出模型每层的参数尺寸\n",
    "def print_model_layers_shapes(model):\n",
    "    print(\"模型各层参数尺寸:\")\n",
    "    for name, param in model.named_parameters():\n",
    "        print(f\"{name}: {param.shape}\")\n",
    "\n",
    "# 对当前模型进行参数尺寸分析\n",
    "print_model_layers_shapes(model)\n",
    "\n",
    "\n",
    "# 对RNN层参数尺寸的详细解释\n",
    "print(f\"\\nRNN层详细信息:\")\n",
    "for name, param in model.rnn.named_parameters():\n",
    "    print(f\"- {name}: {param.shape}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f0b3c05",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 初始化双向RNN模型\n",
    "bidirectional_model = SentimentClassifier(vocab_size, rnn_layers=1, bidirectional=True)\n",
    "print(f\"双向RNN模型结构:\\n{bidirectional_model}\")\n",
    "\n",
    "# 计算双向RNN模型参数数量\n",
    "print(f\"双层双向RNN模型参数数量: {count_parameters(bidirectional_model):,}\")\n",
    "\n",
    "\n",
    "# 对双向RNN层参数尺寸的详细解释\n",
    "print(f\"\\n双向RNN层详细信息:\")\n",
    "for name, param in bidirectional_model.rnn.named_parameters():\n",
    "    print(f\"- {name}: {param.shape}\")\n",
    "    \n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e15cf57",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 初始化双层单向RNN模型\n",
    "two_layer_model = SentimentClassifier(vocab_size, rnn_layers=2, bidirectional=False)\n",
    "print(f\"双层单向RNN模型结构:\\n{two_layer_model}\")\n",
    "\n",
    "# 计算双层单向RNN模型参数数量\n",
    "print(f\"双层单向RNN模型参数数量: {count_parameters(two_layer_model):,}\")\n",
    "\n",
    "# 对双层单向RNN层参数尺寸的详细解释\n",
    "print(f\"\\n双层单向RNN层详细信息:\")\n",
    "for name, param in two_layer_model.rnn.named_parameters():\n",
    "    print(f\"- {name}: {param.shape}\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19962cc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置设备\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(f\"使用设备: {device}\")\n",
    "# 验证模型的前向计算\n",
    "# 从训练数据中获取一个批次的样本\n",
    "sample_batch, sample_labels = next(iter(train_dataloader))\n",
    "print(f\"输入形状: {sample_batch.shape}\")\n",
    "\n",
    "# 将样本移动到设备上\n",
    "sample_batch = sample_batch.to(device)\n",
    "\n",
    "# 进行前向计算\n",
    "with torch.no_grad():\n",
    "    outputs = bidirectional_model(sample_batch)\n",
    "    print(f\"输出形状: {outputs.shape}\")\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b2be0bd2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "f6b527a4",
   "metadata": {},
   "source": [
    "# 训练，画图，评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f63a223",
   "metadata": {},
   "outputs": [],
   "source": [
    "from wangdao_deeplearning_train import train_two_classification_model, evaluate_two_classification_model,plot_learning_curves,EarlyStopping,ModelSaver\n",
    "\n",
    "model = SentimentClassifier(vocab_size, rnn_layers=2, bidirectional=False)\n",
    "print(f\"模型结构:\\n{model}\")\n",
    "\n",
    "# 定义损失函数和优化器\n",
    "criterion = nn.BCEWithLogitsLoss() #WithLogitsLoss代表的含义是：把输出结果通过sigmoid函数，然后计算损失\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
    "# 将模型移动到设备上\n",
    "model = model.to(device)\n",
    "\n",
    "\n",
    "# 训练参数\n",
    "num_epochs = 20\n",
    "eval_step = 100\n",
    "\n",
    "# 训练模型\n",
    "# 创建早停和模型保存器\n",
    "early_stopping = EarlyStopping(patience=5, delta=0.001)\n",
    "model_saver = ModelSaver(save_dir='weights')\n",
    "\n",
    "model, record_dict = train_two_classification_model(\n",
    "    model=model,\n",
    "    train_loader=train_dataloader,\n",
    "    val_loader=val_dataloader,\n",
    "    criterion=criterion,\n",
    "    optimizer=optimizer,\n",
    "    device=device,\n",
    "    num_epochs=num_epochs,\n",
    "    eval_step=eval_step,\n",
    "    early_stopping=early_stopping,\n",
    "    model_saver=model_saver\n",
    ")\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0fb25282",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绘制学习曲线\n",
    "plot_learning_curves(record_dict,sample_step=100)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55a18ae6",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "# 在测试集上评估最终模型\n",
    "test_acc, test_loss = evaluate_two_classification_model(model, test_dataloader, device, criterion)\n",
    "print(f\"测试集准确率: {test_acc:.2f}%, 测试集损失: {test_loss:.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "255c37e9",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
