{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f46ed21-cc19-49ae-a72d-fb584c6969dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch  \n",
    "import torch.nn as nn  \n",
    "from torch.utils.data import Dataset, DataLoader  \n",
    "import numpy as np  \n",
    "from torchtext.legacy import data  \n",
    "from torchtext.legacy import datasets  \n",
    "  \n",
    "# 设置设备（使用GPU如果可用）  \n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")  \n",
    "  \n",
    "# 使用TorchText下载和预处理IMDB数据集  \n",
    "TEXT = data.Field(tokenize='spacy', tokenizer_language='en_core_web_sm')  \n",
    "LABEL = data.LabelField(dtype=torch.float)  # 情感标签为浮点数，通常为0或1  \n",
    "  \n",
    "train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)  \n",
    "  \n",
    "# 建立词汇表  \n",
    "TEXT.build_vocab(train_data, max_size=25000, vectors=\"glove.6B.100d\", unk_init=torch.Tensor.normal_)  \n",
    "LABEL.build_vocab(train_data)  \n",
    "  \n",
    "# 创建迭代器，批处理数据  \n",
    "batch_size = 64  \n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  \n",
    "  \n",
    "train_iterator, test_iterator = data.BucketIterator.splits(  \n",
    "    (train_data, test_data),  \n",
    "    batch_size = batch_size,  \n",
    "    device = device)  \n",
    "  \n",
    "# 定义RNN模型  \n",
    "class RNN(nn.Module):  \n",
    "    def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):  \n",
    "        super().__init__()  \n",
    "          \n",
    "        self.embedding = nn.Embedding(input_dim, embedding_dim)  \n",
    "        self.rnn = nn.LSTM(embedding_dim, hidden_dim)  \n",
    "        self.fc = nn.Linear(hidden_dim, output_dim)  \n",
    "          \n",
    "    def forward(self, text, text_lengths):  \n",
    "        embedded = self.embedding(text)  \n",
    "        packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)  \n",
    "        packed_output, (hidden, cell) = self.rnn(packed_embedded)  \n",
    "        output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)  \n",
    "          \n",
    "        # 只使用最后的隐藏状态  \n",
    "        hidden = self.dropout(hidden[-1])  \n",
    "          \n",
    "        return self.fc(hidden.squeeze(0))  \n",
    "  \n",
    "# 初始化模型参数  \n",
    "input_dim = len(TEXT.vocab)  \n",
    "embedding_dim = 100  \n",
    "hidden_dim = 256  \n",
    "output_dim = 1  \n",
    "  \n",
    "# 实例化模型  \n",
    "model = RNN(input_dim, embedding_dim, hidden_dim, output_dim)  \n",
    "  \n",
    "# 初始化模型权重  \n",
    "pretrained_embeddings = TEXT.vocab.vectors  \n",
    "model.embedding.weight.data.copy_(pretrained_embeddings)  \n",
    "  \n",
    "# 设置优化器和损失函数  \n",
    "optimizer = torch.optim.Adam(model.parameters())  \n",
    "criterion = nn.BCEWithLogitsLoss()  \n",
    "  \n",
    "model = model.to(device)  \n",
    "criterion = criterion.to(device)  \n",
    "  \n",
    "# 训练函数  \n",
    "def train(model, iterator, optimizer, criterion):  \n",
    "    model.train()  \n",
    "    epoch_loss = 0  \n",
    "      \n",
    "    for batch in iterator:  \n",
    "        text, text_lengths = batch.text  \n",
    "        predictions = model(text, text_lengths).squeeze(1)  \n",
    "        loss = criterion(predictions, batch.label)  \n",
    "        optimizer.zero_grad()  \n",
    "        loss.backward()  \n",
    "        optimizer.step()  \n",
    "        epoch_loss += loss.item()  \n",
    "          \n",
    "    return epoch_loss / len(iterator)  \n",
    "  \n",
    "# 评估函数  \n",
    "def evaluate(model, iterator):  \n",
    "    model.eval()  \n",
    "    epoch_loss = 0  \n",
    "    correct_predictions = 0  \n",
    "    total_predictions = 0  \n",
    "\n",
    "    with torch.no_grad():  \n",
    "        for batch in iterator:  \n",
    "            text, text_lengths = batch.text  \n",
    "            predictions = model(text, text_lengths).squeeze(1)  \n",
    "            rounded_predictions = torch.round(torch.sigmoid(predictions))  \n",
    "            correct_predictions += (rounded_predictions == batch.label).sum().item()  \n",
    "            total_predictions += batch.label.shape[0]  \n",
    "            loss = criterion(predictions, batch.label)  \n",
    "            epoch_loss += loss.item()  \n",
    "              \n",
    "    return epoch_loss / len(iterator), correct_predictions / total_predictions  \n",
    "  \n",
    "# 训练模型  \n",
    "N_EPOCHS = 5  \n",
    "  \n",
    "for epoch in range(N_EPOCHS):  \n",
    "    train_loss = train(model, train_iterator, optimizer, criterion)  \n",
    "    test_loss, test_acc = evaluate(model, test_iterator)  \n",
    "      \n",
    "    print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}')  \n",
    "  \n",
    "# 使用模型进行预测  \n",
    "def predict_sentiment(model, sentence):  \n",
    "    tokenized = [tok.text for tok in spacy.load('en_core_web_sm')(sentence)]  \n",
    "    indexed = [TEXT.vocab.stoi[t] for t in tokenized]  \n",
    "    length = torch.tensor([len(indexed)])  \n",
    "    tensor = torch.LongTensor(indexed).to(device)  \n",
    "    tensor = tensor.unsqueeze(1)  \n",
    "    model.eval()  \n",
    "    prediction = torch.sigmoid(model(tensor, length))  \n",
    "    return prediction.item()  \n",
    "  \n",
    "# 示例预测  \n",
    "example_sentence = \"This movie was absolutely terrible!\"  \n",
    "prediction = predict_sentiment(model, example_sentence)  \n",
    "print(f\"Predicted sentiment for '{example_sentence}': {prediction:.2f}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
