{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3abf9749-b216-4ced-87c4-5ca9dea47221",
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'torchtext'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[1], line 4\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mnn\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mnn\u001b[39;00m  \n\u001b[1;32m      3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01moptim\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01moptim\u001b[39;00m  \n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorchtext\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mlegacy\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m data  \n\u001b[1;32m      5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorchtext\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mlegacy\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m datasets  \n\u001b[1;32m      6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mnltk\u001b[39;00m  \n",
      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'torchtext'"
     ]
    }
   ],
   "source": [
    "import torch  \n",
    "import torch.nn as nn  \n",
    "import torch.optim as optim  \n",
    "from torchtext.legacy import data  \n",
    "from torchtext.legacy import datasets  \n",
    "import nltk  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "904a605b-64b7-4a6a-b025-9305237c781a",
   "metadata": {},
   "outputs": [],
   "source": [
    "nltk.download('punkt')  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "df806ce4-5d2c-413b-8ecd-f0611174ad11",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义字段处理器  \n",
    "TEXT = data.Field(tokenize='spacy', tokenizer_language='en_core_web_sm')  \n",
    "LABEL = data.LabelField(dtype=torch.float)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82d712bf-89e2-473c-8c38-2479f70804c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用TorchText的TabularDataset加载数据  \n",
    "train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "77b64f8f-1340-4c06-af99-3d528a7375f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d9107a82-e5a6-48a9-a159-ccdf9aa0f4db",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 建立词汇表  \n",
    "TEXT.build_vocab(train_data, max_size=25000, vectors=\"glove.6B.100d\", unk_init=torch.Tensor.normal_)  \n",
    "LABEL.build_vocab(train_data)  \n",
    "  \n",
    "# 定义LSTM模型  \n",
    "class LSTM(nn.Module):  \n",
    "    def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, n_layers, drop_prob=0.2):  \n",
    "        super(LSTM, self).__init__()  \n",
    "        self.hidden_dim = hidden_dim  \n",
    "        self.n_layers = n_layers  \n",
    "          \n",
    "        self.embedding = nn.Embedding(input_dim, embedding_dim)  \n",
    "        self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=drop_prob, batch_first=True)  \n",
    "        self.dropout = nn.Dropout(drop_prob)  \n",
    "        self.fc = nn.Linear(hidden_dim, output_dim)  \n",
    "        self.relu = nn.ReLU()  \n",
    "          \n",
    "    def forward(self, x, hidden):  \n",
    "        batch_size = x.size(0)  \n",
    "        x = self.embedding(x)  \n",
    "        lstm_out, hidden = self.lstm(x, hidden)  \n",
    "        lstm_out = self.dropout(lstm_out)  \n",
    "        lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)  \n",
    "        out = self.fc(lstm_out)  \n",
    "        return out, hidden  \n",
    "      \n",
    "    def init_hidden(self, batch_size):  \n",
    "        weight = next(self.parameters()).data  \n",
    "        hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device),  \n",
    "                  weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device))  \n",
    "        return hidden  \n",
    "  \n",
    "# 参数  \n",
    "input_dim = len(TEXT.vocab)  \n",
    "embedding_dim = 100  \n",
    "hidden_dim = 256  \n",
    "output_dim = 1  \n",
    "n_layers = 2  \n",
    "drop_prob = 0.5  \n",
    "  \n",
    "# 实例化模型  \n",
    "model = LSTM(input_dim, embedding_dim, hidden_dim, output_dim, n_layers, drop_prob)  \n",
    "  \n",
    "# 选择设备（如果有GPU，使用GPU）  \n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  \n",
    "model = model.to(device)  \n",
    "  \n",
    "# 定义损失函数和优化器  \n",
    "criterion = nn.BCEWithLogitsLoss()  \n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)  \n",
    "  \n",
    "# 训练函数  \n",
    "def train(model, iterator, optimizer, criterion):  \n",
    "    model.train()  \n",
    "    epoch_loss = 0  \n",
    "      \n",
    "    for batch in iterator:  \n",
    "        optimizer.zero_grad()  \n",
    "        text, text_lengths = batch.text  \n",
    "        predictions, hidden = model(text.to(device), None)  \n",
    "        batch_size = text.size(0)  \n",
    "          \n",
    "        # 将预测结果重新调整为正确的形状  \n",
    "        predictions = predictions.view(batch_size, -1)  \n",
    "          \n",
    "        loss = criterion(predictions, batch.label.float().to(device))  \n",
    "        loss.backward()  \n",
    "        optimizer.step()  \n",
    "        epoch_loss += loss.item()  \n",
    "          \n",
    "    return epoch_loss / len(iterator)  \n",
    "  \n",
    "# 测试函数  \n",
    "def evaluate(model, iterator, criterion):  \n",
    "    model.eval()  \n",
    "    epoch_loss = 0  \n",
    "      \n",
    "    with torch.no_grad():  \n",
    "        for batch in iterator:  \n",
    "            text, text_lengths = batch.text  \n",
    "            predictions, hidden = model(text.to(device), None)  \n",
    "            batch_size = text.size(0)  \n",
    "              \n",
    "            predictions = predictions.view(batch_size, -1)  \n",
    "              \n",
    "            loss = criterion(predictions, batch.label.float().to(device))  \n",
    "            epoch_loss += loss.item()  \n",
    "          \n",
    "    return epoch_loss / len(iterator)  \n",
    "  \n",
    "# 创建迭代器  \n",
    "batch_size = 64  \n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  \n",
    "train_iterator, test_iterator = data.BucketIterator.splits(  \n",
    "    (train_data, test_data),  \n",
    "    batch_size = batch_size,  \n",
    "    device = device,  \n",
    "    sort_key = lambda x: len(x.text),  \n",
    "    sort_within_batch=False,  \n",
    "    repeat=False  \n",
    ")  \n",
    "  \n",
    "# 训练模型  \n",
    "n_epochs = 10  \n",
    "for epoch in range(n_epochs):  \n",
    "    train_loss = train(model, train_iterator, optimizer, criterion)  \n",
    "    test_loss = evaluate(model, test_iterator, criterion)  \n",
    "      \n",
    "    print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Test Loss: {test_loss:.3f}')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
