{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "47330f52-7d3e-41d9-a49d-f5fd2b4e03f6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "IMDB数据集已下载并保存到本地\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import re\n",
    "import jieba\n",
    "from tensorflow.keras.preprocessing.text import Tokenizer\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "from tensorflow.keras.models import Model\n",
    "from tensorflow.keras.layers import Input, Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout, LSTM\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n",
    "from tensorflow.keras.datasets import imdb\n",
    "\n",
    "# 数据读取与预处理\n",
    "def preprocess_text(text):\n",
    "    # 去除无关符号\n",
    "    text = re.sub(r'[^\\w\\s]', '', text)\n",
    "    # 分词（针对中文文本）\n",
    "    words = jieba.lcut(text)\n",
    "    return ' '.join(words)\n",
    "\n",
    "# 下载IMDB数据集并保存到本地\n",
    "def download_and_save_imdb():\n",
    "    # 使用Keras内置的IMDB数据集下载功能\n",
    "    (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=10000)\n",
    "    # 将数据转换为DataFrame\n",
    "    train_data = pd.DataFrame({'text': x_train, 'label': y_train})\n",
    "    test_data = pd.DataFrame({'text': x_test, 'label': y_test})\n",
    "    # 将数据保存为CSV文件\n",
    "    train_data.to_csv('imdb_train.csv', index=False)\n",
    "    test_data.to_csv('imdb_test.csv', index=False)\n",
    "    print(\"IMDB数据集已下载并保存到本地\")\n",
    "\n",
    "# 调用函数下载并保存IMDB数据集\n",
    "download_and_save_imdb()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7ce073d5-1a03-40c1-879d-d8b9c91aa4a6",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-05-26 11:52:50.325345: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "282/282 [==============================] - 5s 17ms/step - loss: 0.6923 - accuracy: 0.5235 - val_loss: 0.6880 - val_accuracy: 0.5080\n",
      "Epoch 2/10\n",
      "282/282 [==============================] - 5s 16ms/step - loss: 0.6847 - accuracy: 0.5513 - val_loss: 0.6751 - val_accuracy: 0.5950\n",
      "Epoch 3/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6763 - accuracy: 0.5749 - val_loss: 0.6629 - val_accuracy: 0.6245\n",
      "Epoch 4/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6642 - accuracy: 0.5981 - val_loss: 0.6517 - val_accuracy: 0.6215\n",
      "Epoch 5/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6567 - accuracy: 0.6058 - val_loss: 0.6441 - val_accuracy: 0.6435\n",
      "Epoch 6/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6519 - accuracy: 0.6182 - val_loss: 0.6619 - val_accuracy: 0.5885\n",
      "Epoch 7/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6477 - accuracy: 0.6214 - val_loss: 0.6358 - val_accuracy: 0.6450\n",
      "Epoch 8/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6484 - accuracy: 0.6224 - val_loss: 0.6373 - val_accuracy: 0.6450\n",
      "Epoch 9/10\n",
      "282/282 [==============================] - 4s 15ms/step - loss: 0.6473 - accuracy: 0.6222 - val_loss: 0.6358 - val_accuracy: 0.6380\n",
      "Epoch 10/10\n",
      "282/282 [==============================] - 4s 16ms/step - loss: 0.6452 - accuracy: 0.6271 - val_loss: 0.6326 - val_accuracy: 0.6500\n",
      "Accuracy: 0.648\n",
      "Precision: 0.6478957847007825\n",
      "Recall: 0.648\n",
      "F1 Score: 0.6479064056365487\n"
     ]
    }
   ],
   "source": [
    "# 读取本地保存的数据集\n",
    "data = pd.read_csv('imdb_train.csv')\n",
    "data['text'] = data['text'].apply(lambda x: ' '.join(map(str, x)))  # 将整数序列转换为字符串\n",
    "data['text'] = data['text'].apply(preprocess_text)\n",
    "texts = data['text'].tolist()\n",
    "labels = data['label'].tolist()\n",
    "\n",
    "# 文本向量化\n",
    "max_words = 10000  # 词汇表大小\n",
    "max_len = 200  # 文本序列最大长度\n",
    "\n",
    "tokenizer = Tokenizer(num_words=max_words)\n",
    "tokenizer.fit_on_texts(texts)\n",
    "sequences = tokenizer.texts_to_sequences(texts)\n",
    "word_index = tokenizer.word_index\n",
    "\n",
    "X = pad_sequences(sequences, maxlen=max_len)\n",
    "y = to_categorical(labels)  # 将标签转换为one-hot编码\n",
    "\n",
    "# 数据集划分\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 模型构建\n",
    "def build_model():\n",
    "    inputs = Input(shape=(max_len,))\n",
    "    # 嵌入层\n",
    "    x = Embedding(input_dim=max_words, output_dim=128)(inputs)\n",
    "    # CNN层\n",
    "    x = Conv1D(filters=128, kernel_size=3, activation='relu')(x)\n",
    "    x = GlobalMaxPooling1D()(x)\n",
    "    x = Dropout(0.5)(x)\n",
    "    outputs = Dense(2, activation='softmax')(x)\n",
    "    model = Model(inputs=inputs, outputs=outputs)\n",
    "    return model\n",
    "\n",
    "model = build_model()\n",
    "model.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])\n",
    "\n",
    "# 模型训练\n",
    "batch_size = 64\n",
    "epochs = 10\n",
    "history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)\n",
    "\n",
    "# 模型评估\n",
    "y_pred = model.predict(X_test)\n",
    "y_pred_class = np.argmax(y_pred, axis=1)\n",
    "y_test_class = np.argmax(y_test, axis=1)\n",
    "\n",
    "print(\"Accuracy:\", accuracy_score(y_test_class, y_pred_class))\n",
    "print(\"Precision:\", precision_score(y_test_class, y_pred_class, average='weighted'))\n",
    "print(\"Recall:\", recall_score(y_test_class, y_pred_class, average='weighted'))\n",
    "print(\"F1 Score:\", f1_score(y_test_class, y_pred_class, average='weighted'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dce628e9-e2f3-4daf-a64c-82facefeb6f8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
