{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 170,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入相关包\n",
    "import os\n",
    "import torch\n",
    "from transformers import  AutoTokenizer, AutoModelForSequenceClassification\n",
    "import pandas\n",
    "from torch.utils.data import Dataset, DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 171,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at D:/code/models/huggingface/rbt3 and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "# 加载tokenizer和模型\n",
    "tokenizer = AutoTokenizer.from_pretrained('D:/code/models/huggingface/rbt3')\n",
    "model =  AutoModelForSequenceClassification.from_pretrained('D:/code/models/huggingface/rbt3')\n",
    "if torch.cuda.is_available():\n",
    "    model.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 172,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 7765 entries, 0 to 7764\n",
      "Data columns (total 2 columns):\n",
      " #   Column  Non-Null Count  Dtype \n",
      "---  ------  --------------  ----- \n",
      " 0   label   7765 non-null   int64 \n",
      " 1   review  7765 non-null   object\n",
      "dtypes: int64(1), object(1)\n",
      "memory usage: 121.5+ KB\n"
     ]
    }
   ],
   "source": [
    "# 加载数据 \n",
    "df =  pandas.read_csv('D:/code/datasets/ChineseNlpCorpus/datasets/ChnSentiCorp_htl_all/ChnSentiCorp_htl_all.csv')\n",
    "df = df.dropna()\n",
    "df.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 173,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('我住的是特色标间，所谓特色，是有些类似家的感觉。寝具不是单调的白色，是条纹和大格子的，感觉很温馨。圈儿椅的靠垫是卡通的加菲猫头，明黄的颜色有点扎眼，和周围配在一起感觉有点不协调。客房硬件评价3.5分。加湿器、烫衣板、电熨斗、吹风机、小冰箱等俱全。缺点：液晶电视固定在墙上，位置不好。一是遮挡了一个电源插座；二是与桌子搭配不当。桌子（与冰箱柜一体）在电视下方，如果坐在桌子旁上网或工作学习，正好挡住了电视屏幕。错开坐，正好是冰箱柜，腿脚伸不开。',\n",
       " 1)"
      ]
     },
     "execution_count": 173,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 创建dataset\n",
    "\n",
    "class Mydataset(Dataset):\n",
    "    def __init__(self, df):\n",
    "        self.data = df\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        review = self.data.loc[index, 'review']\n",
    "        label = self.data.loc[index, 'label']\n",
    "        return review,label\n",
    "    \n",
    "dataset =  Mydataset(df)\n",
    "dataset[10]    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 174,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(6212, 1553)"
      ]
     },
     "execution_count": 174,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 划分数据集\n",
    "from torch.utils.data  import random_split\n",
    "train_dataset, val_dataset = random_split(dataset, [0.8, 0.2])\n",
    "len(train_dataset),len(val_dataset)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 175,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Encoding(num_tokens=128, attributes=[ids, type_ids, tokens, offsets, attention_mask, special_tokens_mask, overflowing])"
      ]
     },
     "execution_count": 175,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 生成dataloader\n",
    "import torch\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "# 数据处理函数\n",
    "def collate_func(batch):\n",
    "    texts,labels = [] ,[]\n",
    "    for item in batch:\n",
    "        texts.append(item[0])\n",
    "        labels.append(item[1])\n",
    "    # print(texts)\n",
    "    # print(labels)\n",
    "    texts = tokenizer(texts,padding=True,truncation=True,max_length=128,return_tensors='pt')\n",
    "    texts['labels'] = torch.tensor(labels)\n",
    "    return texts\n",
    "\n",
    "train_loader = DataLoader(train_dataset,batch_size=32,shuffle=True,collate_fn=collate_func)\n",
    "val_loader = DataLoader(val_dataset,batch_size=64,shuffle=False,collate_fn=collate_func)\n",
    "\n",
    "next(enumerate(train_loader))[1][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 176,
   "metadata": {},
   "outputs": [],
   "source": [
    "#优化器\n",
    "from torch.optim import Adam\n",
    "optimizer = Adam(model.parameters(), lr=2e-5)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 177,
   "metadata": {},
   "outputs": [],
   "source": [
    "#训练和验证\n",
    "def evaluate(epoch=5,log_step=100):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        acc_num = 0\n",
    "        for batch in val_loader:\n",
    "            if torch.cuda.is_available():\n",
    "                batch = {k:v.cuda() for k,v in batch.items()}\n",
    "            output = model(**batch)\n",
    "            pred =  torch.argmax(output.logits,dim=-1)\n",
    "            acc_num += torch.sum(pred.long() == batch['labels'].long()).item()\n",
    "    acc = acc_num / len(val_dataset)\n",
    "    return acc\n",
    "\n",
    "def save_model(model,tokenizer,epoch):\n",
    "    original_path = \"D:/code/models/classfication_rbt3/\"\n",
    "    path = os.path.join(original_path,'epoch_{}'.format(epoch))\n",
    "    if not os.path.exists(path):\n",
    "        os.makedirs(path)\n",
    "    model.save_pretrained(path)\n",
    "    tokenizer.save_pretrained(path)\n",
    "    \n",
    "\n",
    "def train(epoch=5,log_step=100):\n",
    "    global_step = 0\n",
    "    for epoch in range(epoch):\n",
    "        model.train()\n",
    "        for batch in train_loader:\n",
    "            if torch.cuda.is_available():\n",
    "                batch = {k:v.cuda() for k,v in batch.items()}\n",
    "            optimizer.zero_grad()\n",
    "            output = model(**batch)\n",
    "            output.loss.backward()\n",
    "            optimizer.step()\n",
    "            if global_step % log_step == 0:\n",
    "                print('epoch:{},step:{},loss:{}'.format(epoch,global_step,output.loss.item()))\n",
    "            global_step+=1  \n",
    "        acc = evaluate()\n",
    "        print('epoch:{},acc:{}'.format(epoch,acc))\n",
    "        save_model(model,tokenizer,epoch)\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 178,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch:0,step:0,loss:0.6780364513397217\n",
      "epoch:0,step:100,loss:0.33629652857780457\n",
      "epoch:0,acc:0.8808757244043787\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'os' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[178], line 2\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[38;5;66;03m#模型训练\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[177], line 40\u001b[0m, in \u001b[0;36mtrain\u001b[1;34m(epoch, log_step)\u001b[0m\n\u001b[0;32m     38\u001b[0m acc \u001b[38;5;241m=\u001b[39m evaluate()\n\u001b[0;32m     39\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mepoch:\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m,acc:\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(epoch,acc))\n\u001b[1;32m---> 40\u001b[0m \u001b[43msave_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43mepoch\u001b[49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[177], line 17\u001b[0m, in \u001b[0;36msave_model\u001b[1;34m(model, tokenizer, epoch)\u001b[0m\n\u001b[0;32m     15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21msave_model\u001b[39m(model,tokenizer,epoch):\n\u001b[0;32m     16\u001b[0m     original_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mD:/code/models/classfication_rbt3/\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m---> 17\u001b[0m     path \u001b[38;5;241m=\u001b[39m \u001b[43mos\u001b[49m\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(original_path,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mepoch_\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(epoch))\n\u001b[0;32m     18\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mexists(path):\n\u001b[0;32m     19\u001b[0m         os\u001b[38;5;241m.\u001b[39mmakedirs(path)\n",
      "\u001b[1;31mNameError\u001b[0m: name 'os' is not defined"
     ]
    }
   ],
   "source": [
    "#模型训练\n",
    "train()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
