{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 文本分类实例"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step1 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:05:49.975031Z",
     "start_time": "2025-09-06T06:05:45.607178Z"
    }
   },
   "source": [
    "from datasets import load_dataset\n",
    "from transformers import AutoTokenizer, AutoModelForSequenceClassification"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step2 加载数据集"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:05:53.405493Z",
     "start_time": "2025-09-06T06:05:52.437409Z"
    }
   },
   "source": [
    "dataset = load_dataset(\"csv\", data_files = \"./ChnSentiCorp_htl_all.csv\", split = \"train\")\n",
    "dataset = dataset.filter(lambda x: x[\"review\"] is not None)\n",
    "dataset"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['label', 'review'],\n",
       "    num_rows: 7765\n",
       "})"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step4 划分数据集"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:05:57.802750Z",
     "start_time": "2025-09-06T06:05:57.782557Z"
    }
   },
   "source": [
    "datasets = dataset.train_test_split(test_size = 0.1)\n",
    "datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['label', 'review'],\n",
       "        num_rows: 6988\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['label', 'review'],\n",
       "        num_rows: 777\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step5 创建Dataloader"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:19.459321Z",
     "start_time": "2025-09-06T06:06:15.482368Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"hfl/rbt3\")\n",
    "\n",
    "\n",
    "def process_function(examples):\n",
    "\ttokenized_examples = tokenizer(examples[\"review\"], max_length = 128, truncation = True)\n",
    "\ttokenized_examples[\"labels\"] = examples[\"label\"]\n",
    "\treturn tokenized_examples\n",
    "\n",
    "\n",
    "tokenized_datasets = datasets.map(process_function, batched = True, remove_columns = datasets[\"train\"].column_names)\n",
    "tokenized_datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Map:   0%|          | 0/6988 [00:00<?, ? examples/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "486e940d9540412492a7849262599a36"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "data": {
      "text/plain": [
       "Map:   0%|          | 0/777 [00:00<?, ? examples/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "1bbe0b582e924785aee4765bb82369b3"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 6988\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 777\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:23.192564Z",
     "start_time": "2025-09-06T06:06:22.676110Z"
    }
   },
   "source": [
    "from torch.utils.data import DataLoader\n",
    "from transformers import DataCollatorWithPadding\n",
    "\n",
    "\n",
    "trainset, validset = tokenized_datasets[\"train\"], tokenized_datasets[\"test\"]\n",
    "trainloader = DataLoader(trainset, batch_size = 32, shuffle = True, collate_fn = DataCollatorWithPadding(tokenizer))\n",
    "validloader = DataLoader(validset, batch_size = 64, shuffle = False, collate_fn = DataCollatorWithPadding(tokenizer))"
   ],
   "outputs": [],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:26.953079Z",
     "start_time": "2025-09-06T06:06:26.937568Z"
    }
   },
   "source": [
    "next(enumerate(validloader))[1]"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': tensor([[ 101, 2791, 7313,  ...,    0,    0,    0],\n",
       "        [ 101, 2791, 7313,  ...,    0,    0,    0],\n",
       "        [ 101, 6821,  702,  ...,    0,    0,    0],\n",
       "        ...,\n",
       "        [ 101,  122, 6983,  ..., 2523, 2345,  102],\n",
       "        [ 101, 6983, 2421,  ..., 1928, 7008,  102],\n",
       "        [ 101, 8142,  119,  ...,    0,    0,    0]]), 'token_type_ids': tensor([[0, 0, 0,  ..., 0, 0, 0],\n",
       "        [0, 0, 0,  ..., 0, 0, 0],\n",
       "        [0, 0, 0,  ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [0, 0, 0,  ..., 0, 0, 0],\n",
       "        [0, 0, 0,  ..., 0, 0, 0],\n",
       "        [0, 0, 0,  ..., 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [1, 1, 1,  ..., 1, 1, 1],\n",
       "        [1, 1, 1,  ..., 1, 1, 1],\n",
       "        [1, 1, 1,  ..., 0, 0, 0]]), 'labels': tensor([1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
       "        0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1,\n",
       "        1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1])}"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step6 创建模型及优化器"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:32.209239Z",
     "start_time": "2025-09-06T06:06:29.338225Z"
    }
   },
   "source": [
    "from torch.optim import Adam\n",
    "\n",
    "\n",
    "model = AutoModelForSequenceClassification.from_pretrained(\"hfl/rbt3\")\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "\tmodel = model.cuda()"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at hfl/rbt3 and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:36.566702Z",
     "start_time": "2025-09-06T06:06:35.286466Z"
    }
   },
   "source": "optimizer = Adam(model.parameters(), lr = 2e-5)",
   "outputs": [],
   "execution_count": 8
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step7 训练与验证"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:06:40.985476Z",
     "start_time": "2025-09-06T06:06:40.972614Z"
    }
   },
   "source": [
    "def evaluate():\n",
    "\tmodel.eval()\n",
    "\tacc_num = 0\n",
    "\twith torch.inference_mode():\n",
    "\t\tfor batch in validloader:\n",
    "\t\t\tif torch.cuda.is_available():\n",
    "\t\t\t\tbatch = {k: v.cuda() for k, v in batch.items()}\n",
    "\t\t\toutput = model(**batch)\n",
    "\t\t\tpred = torch.argmax(output.logits, dim = -1)\n",
    "\t\t\tacc_num += (pred.long() == batch[\"labels\"].long()).float().sum()\n",
    "\treturn acc_num / len(validset)\n",
    "\n",
    "\n",
    "def train(epoch = 3, log_step = 100):\n",
    "\tglobal_step = 0\n",
    "\tfor ep in range(epoch):\n",
    "\t\tmodel.train()\n",
    "\t\tfor batch in trainloader:\n",
    "\t\t\tif torch.cuda.is_available():\n",
    "\t\t\t\tbatch = {k: v.cuda() for k, v in batch.items()}\n",
    "\t\t\toptimizer.zero_grad()\n",
    "\t\t\toutput = model(**batch)\n",
    "\t\t\toutput.loss.backward()\n",
    "\t\t\toptimizer.step()\n",
    "\t\t\tif global_step % log_step == 0:\n",
    "\t\t\t\tprint(f\"ep: {ep}, global_step: {global_step}, loss: {output.loss.item()}\")\n",
    "\t\t\tglobal_step += 1\n",
    "\t\tacc = evaluate()\n",
    "\t\tprint(f\"ep: {ep}, acc: {acc}\")"
   ],
   "outputs": [],
   "execution_count": 11
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step8 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:09:31.968265Z",
     "start_time": "2025-09-06T06:06:44.298060Z"
    }
   },
   "source": [
    "train()"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ep: 0, global_step: 0, loss: 0.9456700086593628\n",
      "ep: 0, global_step: 100, loss: 0.41695377230644226\n",
      "ep: 0, global_step: 200, loss: 0.28310734033584595\n",
      "ep: 0, acc: 0.8622908592224121\n",
      "ep: 1, global_step: 300, loss: 0.5138930082321167\n",
      "ep: 1, global_step: 400, loss: 0.12436880171298981\n",
      "ep: 1, acc: 0.8970398902893066\n",
      "ep: 2, global_step: 500, loss: 0.1453189104795456\n",
      "ep: 2, global_step: 600, loss: 0.2378232628107071\n",
      "ep: 2, acc: 0.877734899520874\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step9 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:13:06.010006Z",
     "start_time": "2025-09-06T06:13:05.982530Z"
    }
   },
   "source": [
    "sen = \"我觉得这家酒店不错，饭很好吃！\"\n",
    "id2_label = {0: \"差评！\", 1: \"好评！\"}\n",
    "model.eval()\n",
    "with torch.inference_mode():\n",
    "\tinputs = tokenizer(sen, return_tensors = \"pt\")\n",
    "\tinputs = {k: v.cuda() for k, v in inputs.items()}\n",
    "\tlogits = model(**inputs).logits\n",
    "\tpred = torch.argmax(logits, dim = -1)\n",
    "\tprint(f\"输入：{sen}\\n模型预测结果:{id2_label.get(pred.item())}\")"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入：我觉得这家酒店不错，饭很好吃！\n",
      "模型预测结果:好评！\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:14:41.007742Z",
     "start_time": "2025-09-06T06:14:41.004378Z"
    }
   },
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "\n",
    "model.config.id2label = id2_label\n",
    "pipe = pipeline(\"text-classification\", model = model, tokenizer = tokenizer, device = 0)"
   ],
   "outputs": [],
   "execution_count": 16
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-06T06:14:43.945876Z",
     "start_time": "2025-09-06T06:14:43.925875Z"
    }
   },
   "source": [
    "pipe(sen)"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'label': '好评！', 'score': 0.9985582232475281}]"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 20
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "transformers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
