{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 要点\n",
    "1. Datasset类 `from datasets import Dataset`\n",
    "2. DataLoader类 `from torch.utils.data import DataLoader`\n",
    "3. DataCollatorWithPadding类 `from transformers import DataCollatorWithPadding`\n",
    "4. DatasetDict进行数据集划分 `train_test_split` 然后在分割为 `tainset` 和 `validset`\n",
    "5. 注意自定义评估函数 最后的除数是`len(ds[\"test\"])`"
   ],
   "id": "39f10b5215e5f665"
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "import warnings\n",
    "from datasets import *\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "数据加载 & 划分",
   "id": "2546576530dd1801"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "datasets1 = load_dataset(\"madao33/new-title-chinese\")\n",
    "datasets1"
   ],
   "id": "50675b3fdd8b4e43",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# datasets2 = load_dataset(\"madao33/new-title-chinese\", split=\"train\")\n",
    "# datasets2, len(datasets2)"
   ],
   "id": "d95b59f6ac96907b",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# datasets3 = load_dataset(\"madao33/new-title-chinese\", split=\"train[10:100]\")\n",
    "# datasets3"
   ],
   "id": "cd3f02f05ce37e42",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# datasets4 = load_dataset(\"madao33/new-title-chinese\", split=\"train[:50%]\")\n",
    "# datasets4"
   ],
   "id": "6750a457cc7ac741",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# datasets5 = load_dataset(\"madao33/new-title-chinese\", split=[\"train[:50%]\", \"validation[:10%]\"])\n",
    "# datasets5"
   ],
   "id": "ef72ae0a5dbe2661",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "再次划分数据集",
   "id": "e816942600d29750"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "resplit_ds = datasets1[\"train\"].train_test_split(test_size=0.1)\n",
    "resplit_ds[\"train\"], resplit_ds[\"test\"]"
   ],
   "id": "fe126aec382df645",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "保存和加载数据集",
   "id": "195025245ea08bbd"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "datasets1.save_to_disk(\"data/processed_data\")\n",
    "ds = load_from_disk(\"data/processed_data\")\n",
    "ds"
   ],
   "id": "1772dafc0afdeb",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "加载自己的数据集",
   "id": "80ed8c578b3eb85a"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from torch.utils.data import DataLoader\n",
    "from transformers import AutoTokenizer, DataCollatorWithPadding\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "\n",
    "# ds = load_dataset(\"csv\", data_files=\"data/ChnSentiCorp_htl_all.csv\", split=\"train\")\n",
    "ds = Dataset.from_csv(\"data/ChnSentiCorp_htl_all.csv\")\n",
    "ds = ds.filter(lambda x: x[\"review\"] is not None)\n",
    "\n",
    "def process_function(examples):\n",
    "    tokenized_examples = tokenizer(examples[\"review\"], truncation=True, max_length=128)\n",
    "    tokenized_examples[\"labels\"] = examples[\"label\"]\n",
    "    return tokenized_examples\n",
    "ds = ds.map(process_function, batched=True, remove_columns=[\"review\", \"label\"])\n",
    "print(ds)\n",
    "\n",
    "dl = DataLoader(ds, batch_size=4, shuffle=False, collate_fn=DataCollatorWithPadding(tokenizer=tokenizer))\n",
    "for k, v in enumerate(dl):\n",
    "    print(v[\"input_ids\"].size())\n",
    "    if k > 10:\n",
    "        break\n"
   ],
   "id": "95ed6f9666ff0429",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "模型训练",
   "id": "f2ebcc7d1830445d"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from transformers import AutoTokenizer, AutoModelForSequenceClassification\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"hfl/rbt3\", trust_remote_code=True)\n",
    "model = AutoModelForSequenceClassification.from_pretrained(\"hfl/rbt3\", trust_remote_code=True).cuda()"
   ],
   "id": "8f54abcd4ae62f6c",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "import torch\n",
    "from datasets import Dataset\n",
    "from transformers import DataCollatorWithPadding\n",
    "\n",
    "ds = Dataset.from_csv(\"data/ChnSentiCorp_htl_all.csv\")\n",
    "ds = ds.filter(lambda x: x[\"review\"] is not None)\n",
    "ds = ds.train_test_split(test_size=0.1)\n",
    "\n",
    "def process_function(examples, tokenizer=tokenizer):\n",
    "    out = tokenizer(examples[\"review\"], truncation=True, max_length=128)\n",
    "    out[\"labels\"] = examples[\"label\"]\n",
    "    return out\n",
    "\n",
    "ds = ds.map(process_function, remove_columns=[\"review\", \"label\"], batched=True, num_proc=4)\n",
    "\n",
    "trainset = DataLoader(ds[\"train\"], batch_size=32, shuffle=True, collate_fn=DataCollatorWithPadding(tokenizer=tokenizer))\n",
    "validset = DataLoader(ds[\"test\"], batch_size=64, shuffle=False, collate_fn=DataCollatorWithPadding(tokenizer=tokenizer))\n",
    "\n",
    "optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)"
   ],
   "id": "1b141a230d8870d2",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "def eval():\n",
    "    model.eval()\n",
    "    acc_num = 0\n",
    "    with torch.inference_mode():\n",
    "        for batch in validset:\n",
    "            batch = {k: v.cuda() for k, v in batch.items()}\n",
    "            outputs = model(**batch)\n",
    "            pred = outputs.logits.argmax(dim=-1)\n",
    "            acc_num += (pred.long() == batch[\"labels\"].long()).float().sum()\n",
    "    return acc_num / len(ds[\"test\"])\n",
    "\n",
    "def train(epochs=3, log_step=100):\n",
    "    global_step = 0\n",
    "    for ep in range(epochs):\n",
    "        model.train()\n",
    "        for batch in trainset:\n",
    "            batch = {k: v.cuda() for k, v in batch.items()}\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(**batch)\n",
    "            outputs.loss.backward()\n",
    "            optimizer.step()\n",
    "            if global_step % log_step == 0:\n",
    "                print(f\"epoch: {ep}, global_step: {global_step}, loss: {outputs.loss.item()}\")\n",
    "            global_step += 1\n",
    "        acc = eval()\n",
    "        print(f\"epoch: {ep}, acc: {acc.item()}\")\n",
    "\n",
    "train()"
   ],
   "id": "5b9f571d518d9c91",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from transformers import pipeline\n",
    "model.config.id2label = {0: \"差评！\", 1: \"好评！\"}\n",
    "pipe = pipeline(\"text-classification\", model=model, tokenizer=tokenizer, device=0)\n",
    "\n",
    "sen = \"我觉得这家酒店不错，饭很好吃！\"\n",
    "pipe(sen)"
   ],
   "id": "6b520a5d2753d531",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": "",
   "id": "fc6ea7fa109ec9ec",
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
